repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/loss/loss_deformable_detr.py | src/transformers/loss/loss_deformable_detr.py | import torch
import torch.nn as nn
from ..image_transforms import center_to_corners_format
from ..utils import is_scipy_available
from .loss_for_object_detection import (
HungarianMatcher,
ImageLoss,
_set_aux_loss,
generalized_box_iou,
sigmoid_focal_loss,
)
if is_scipy_available():
from scipy.optimize import linear_sum_assignment
class DeformableDetrHungarianMatcher(HungarianMatcher):
@torch.no_grad()
def forward(self, outputs, targets):
"""
Differences:
- out_prob = outputs["logits"].flatten(0, 1).sigmoid() instead of softmax
- class_cost uses alpha and gamma
"""
batch_size, num_queries = outputs["logits"].shape[:2]
# We flatten to compute the cost matrices in a batch
out_prob = outputs["logits"].flatten(0, 1).sigmoid() # [batch_size * num_queries, num_classes]
out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4]
# Also concat the target labels and boxes
target_ids = torch.cat([v["class_labels"] for v in targets])
target_bbox = torch.cat([v["boxes"] for v in targets])
# Compute the classification cost.
alpha = 0.25
gamma = 2.0
neg_cost_class = (1 - alpha) * (out_prob**gamma) * (-(1 - out_prob + 1e-8).log())
pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log())
class_cost = pos_cost_class[:, target_ids] - neg_cost_class[:, target_ids]
# Compute the L1 cost between boxes
bbox_cost = torch.cdist(out_bbox, target_bbox, p=1)
# Compute the giou cost between boxes
giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox))
# Final cost matrix
cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost
cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu()
sizes = [len(v["boxes"]) for v in targets]
indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_matrix.split(sizes, -1))]
return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
class DeformableDetrImageLoss(ImageLoss):
def __init__(self, matcher, num_classes, focal_alpha, losses):
nn.Module.__init__(self)
self.matcher = matcher
self.num_classes = num_classes
self.focal_alpha = focal_alpha
self.losses = losses
# removed logging parameter, which was part of the original implementation
def loss_labels(self, outputs, targets, indices, num_boxes):
"""
Classification loss (Binary focal loss) targets dicts must contain the key "class_labels" containing a tensor
of dim [nb_target_boxes]
"""
if "logits" not in outputs:
raise KeyError("No logits were found in the outputs")
source_logits = outputs["logits"]
idx = self._get_source_permutation_idx(indices)
target_classes_o = torch.cat([t["class_labels"][J] for t, (_, J) in zip(targets, indices)])
target_classes = torch.full(
source_logits.shape[:2], self.num_classes, dtype=torch.int64, device=source_logits.device
)
target_classes[idx] = target_classes_o
target_classes_onehot = torch.zeros(
[source_logits.shape[0], source_logits.shape[1], source_logits.shape[2] + 1],
dtype=source_logits.dtype,
layout=source_logits.layout,
device=source_logits.device,
)
target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1)
target_classes_onehot = target_classes_onehot[:, :, :-1]
loss_ce = (
sigmoid_focal_loss(source_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2)
* source_logits.shape[1]
)
losses = {"loss_ce": loss_ce}
return losses
def DeformableDetrForSegmentationLoss(
logits, labels, device, pred_boxes, pred_masks, config, outputs_class=None, outputs_coord=None, **kwargs
):
# First: create the matcher
matcher = HungarianMatcher(class_cost=config.class_cost, bbox_cost=config.bbox_cost, giou_cost=config.giou_cost)
# Second: create the criterion
losses = ["labels", "boxes", "cardinality", "masks"]
criterion = DeformableDetrImageLoss(
matcher=matcher,
num_classes=config.num_labels,
focal_alpha=config.focal_alpha,
losses=losses,
)
criterion.to(device)
# Third: compute the losses, based on outputs and labels
outputs_loss = {}
outputs_loss["logits"] = logits
outputs_loss["pred_boxes"] = pred_boxes
outputs_loss["pred_masks"] = pred_masks
auxiliary_outputs = None
if config.auxiliary_loss:
auxiliary_outputs = _set_aux_loss(outputs_class, outputs_coord)
outputs_loss["auxiliary_outputs"] = auxiliary_outputs
loss_dict = criterion(outputs_loss, labels)
# Fourth: compute total loss, as a weighted sum of the various losses
weight_dict = {"loss_ce": 1, "loss_bbox": config.bbox_loss_coefficient}
weight_dict["loss_giou"] = config.giou_loss_coefficient
weight_dict["loss_mask"] = config.mask_loss_coefficient
weight_dict["loss_dice"] = config.dice_loss_coefficient
if config.auxiliary_loss:
aux_weight_dict = {}
for i in range(config.decoder_layers - 1):
aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()})
weight_dict.update(aux_weight_dict)
loss = sum(loss_dict[k] * weight_dict[k] for k in loss_dict if k in weight_dict)
return loss, loss_dict, auxiliary_outputs
def DeformableDetrForObjectDetectionLoss(
logits, labels, device, pred_boxes, config, outputs_class=None, outputs_coord=None, **kwargs
):
# First: create the matcher
matcher = DeformableDetrHungarianMatcher(
class_cost=config.class_cost, bbox_cost=config.bbox_cost, giou_cost=config.giou_cost
)
# Second: create the criterion
losses = ["labels", "boxes", "cardinality"]
criterion = DeformableDetrImageLoss(
matcher=matcher,
num_classes=config.num_labels,
focal_alpha=config.focal_alpha,
losses=losses,
)
criterion.to(device)
# Third: compute the losses, based on outputs and labels
outputs_loss = {}
auxiliary_outputs = None
outputs_loss["logits"] = logits
outputs_loss["pred_boxes"] = pred_boxes
if config.auxiliary_loss:
auxiliary_outputs = _set_aux_loss(outputs_class, outputs_coord)
outputs_loss["auxiliary_outputs"] = auxiliary_outputs
loss_dict = criterion(outputs_loss, labels)
# Fourth: compute total loss, as a weighted sum of the various losses
weight_dict = {"loss_ce": 1, "loss_bbox": config.bbox_loss_coefficient}
weight_dict["loss_giou"] = config.giou_loss_coefficient
if config.auxiliary_loss:
aux_weight_dict = {}
for i in range(config.decoder_layers - 1):
aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()})
weight_dict.update(aux_weight_dict)
loss = sum(loss_dict[k] * weight_dict[k] for k in loss_dict if k in weight_dict)
return loss, loss_dict, auxiliary_outputs
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/loss/loss_d_fine.py | src/transformers/loss/loss_d_fine.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..utils import is_vision_available
from .loss_for_object_detection import box_iou
from .loss_rt_detr import RTDetrHungarianMatcher, RTDetrLoss
if is_vision_available():
from transformers.image_transforms import center_to_corners_format
def _set_aux_loss(outputs_class, outputs_coord):
return [{"logits": a, "pred_boxes": b} for a, b in zip(outputs_class, outputs_coord)]
def _set_aux_loss2(
outputs_class, outputs_coord, outputs_corners, outputs_ref, teacher_corners=None, teacher_logits=None
):
return [
{
"logits": a,
"pred_boxes": b,
"pred_corners": c,
"ref_points": d,
"teacher_corners": teacher_corners,
"teacher_logits": teacher_logits,
}
for a, b, c, d in zip(outputs_class, outputs_coord, outputs_corners, outputs_ref)
]
def weighting_function(max_num_bins: int, up: torch.Tensor, reg_scale: int) -> torch.Tensor:
"""
Generates the non-uniform Weighting Function W(n) for bounding box regression.
Args:
max_num_bins (int): Max number of the discrete bins.
up (Tensor): Controls upper bounds of the sequence,
where maximum offset is ±up * H / W.
reg_scale (float): Controls the curvature of the Weighting Function.
Larger values result in flatter weights near the central axis W(max_num_bins/2)=0
and steeper weights at both ends.
Returns:
Tensor: Sequence of Weighting Function.
"""
upper_bound1 = abs(up[0]) * abs(reg_scale)
upper_bound2 = abs(up[0]) * abs(reg_scale) * 2
step = (upper_bound1 + 1) ** (2 / (max_num_bins - 2))
left_values = [-((step) ** i) + 1 for i in range(max_num_bins // 2 - 1, 0, -1)]
right_values = [(step) ** i - 1 for i in range(1, max_num_bins // 2)]
values = [-upper_bound2] + left_values + [torch.zeros_like(up[0][None])] + right_values + [upper_bound2]
values = [v if v.dim() > 0 else v.unsqueeze(0) for v in values]
values = torch.cat(values, 0)
return values
def translate_gt(gt: torch.Tensor, max_num_bins: int, reg_scale: int, up: torch.Tensor):
"""
Decodes bounding box ground truth (GT) values into distribution-based GT representations.
This function maps continuous GT values into discrete distribution bins, which can be used
for regression tasks in object detection models. It calculates the indices of the closest
bins to each GT value and assigns interpolation weights to these bins based on their proximity
to the GT value.
Args:
gt (Tensor): Ground truth bounding box values, shape (N, ).
max_num_bins (int): Maximum number of discrete bins for the distribution.
reg_scale (float): Controls the curvature of the Weighting Function.
up (Tensor): Controls the upper bounds of the Weighting Function.
Returns:
tuple[Tensor, Tensor, Tensor]:
- indices (Tensor): Index of the left bin closest to each GT value, shape (N, ).
- weight_right (Tensor): Weight assigned to the right bin, shape (N, ).
- weight_left (Tensor): Weight assigned to the left bin, shape (N, ).
"""
gt = gt.reshape(-1)
function_values = weighting_function(max_num_bins, up, reg_scale)
# Find the closest left-side indices for each value
diffs = function_values.unsqueeze(0) - gt.unsqueeze(1)
mask = diffs <= 0
closest_left_indices = torch.sum(mask, dim=1) - 1
# Calculate the weights for the interpolation
indices = closest_left_indices.float()
weight_right = torch.zeros_like(indices)
weight_left = torch.zeros_like(indices)
valid_idx_mask = (indices >= 0) & (indices < max_num_bins)
valid_indices = indices[valid_idx_mask].long()
# Obtain distances
left_values = function_values[valid_indices]
right_values = function_values[valid_indices + 1]
left_diffs = torch.abs(gt[valid_idx_mask] - left_values)
right_diffs = torch.abs(right_values - gt[valid_idx_mask])
# Valid weights
weight_right[valid_idx_mask] = left_diffs / (left_diffs + right_diffs)
weight_left[valid_idx_mask] = 1.0 - weight_right[valid_idx_mask]
# Invalid weights (out of range)
invalid_idx_mask_neg = indices < 0
weight_right[invalid_idx_mask_neg] = 0.0
weight_left[invalid_idx_mask_neg] = 1.0
indices[invalid_idx_mask_neg] = 0.0
invalid_idx_mask_pos = indices >= max_num_bins
weight_right[invalid_idx_mask_pos] = 1.0
weight_left[invalid_idx_mask_pos] = 0.0
indices[invalid_idx_mask_pos] = max_num_bins - 0.1
return indices, weight_right, weight_left
def bbox2distance(points, bbox, max_num_bins, reg_scale, up, eps=0.1):
"""
Converts bounding box coordinates to distances from a reference point.
Args:
points (Tensor): (n, 4) [x, y, w, h], where (x, y) is the center.
bbox (Tensor): (n, 4) bounding boxes in "xyxy" format.
max_num_bins (float): Maximum bin value.
reg_scale (float): Controlling curvarture of W(n).
up (Tensor): Controlling upper bounds of W(n).
eps (float): Small value to ensure target < max_num_bins.
Returns:
Tensor: Decoded distances.
"""
reg_scale = abs(reg_scale)
left = (points[:, 0] - bbox[:, 0]) / (points[..., 2] / reg_scale + 1e-16) - 0.5 * reg_scale
top = (points[:, 1] - bbox[:, 1]) / (points[..., 3] / reg_scale + 1e-16) - 0.5 * reg_scale
right = (bbox[:, 2] - points[:, 0]) / (points[..., 2] / reg_scale + 1e-16) - 0.5 * reg_scale
bottom = (bbox[:, 3] - points[:, 1]) / (points[..., 3] / reg_scale + 1e-16) - 0.5 * reg_scale
four_lens = torch.stack([left, top, right, bottom], -1)
four_lens, weight_right, weight_left = translate_gt(four_lens, max_num_bins, reg_scale, up)
if max_num_bins is not None:
four_lens = four_lens.clamp(min=0, max=max_num_bins - eps)
return four_lens.reshape(-1).detach(), weight_right.detach(), weight_left.detach()
class DFineLoss(RTDetrLoss):
"""
This class computes the losses for D-FINE. The process happens in two steps: 1) we compute hungarian assignment
between ground truth boxes and the outputs of the model 2) we supervise each pair of matched ground-truth /
prediction (supervise class and box).
Args:
matcher (`DetrHungarianMatcher`):
Module able to compute a matching between targets and proposals.
weight_dict (`Dict`):
Dictionary relating each loss with its weights. These losses are configured in DFineConf as
`weight_loss_vfl`, `weight_loss_bbox`, `weight_loss_giou`, `weight_loss_fgl`, `weight_loss_ddf`
losses (`list[str]`):
List of all the losses to be applied. See `get_loss` for a list of all available losses.
alpha (`float`):
Parameter alpha used to compute the focal loss.
gamma (`float`):
Parameter gamma used to compute the focal loss.
eos_coef (`float`):
Relative classification weight applied to the no-object category.
num_classes (`int`):
Number of object categories, omitting the special no-object category.
"""
def __init__(self, config):
super().__init__(config)
self.matcher = RTDetrHungarianMatcher(config)
self.max_num_bins = config.max_num_bins
self.weight_dict = {
"loss_vfl": config.weight_loss_vfl,
"loss_bbox": config.weight_loss_bbox,
"loss_giou": config.weight_loss_giou,
"loss_fgl": config.weight_loss_fgl,
"loss_ddf": config.weight_loss_ddf,
}
self.losses = ["vfl", "boxes", "local"]
self.reg_scale = config.reg_scale
self.up = nn.Parameter(torch.tensor([config.up]), requires_grad=False)
def unimodal_distribution_focal_loss(
self, pred, label, weight_right, weight_left, weight=None, reduction="sum", avg_factor=None
):
dis_left = label.long()
dis_right = dis_left + 1
loss = F.cross_entropy(pred, dis_left, reduction="none") * weight_left.reshape(-1) + F.cross_entropy(
pred, dis_right, reduction="none"
) * weight_right.reshape(-1)
if weight is not None:
weight = weight.float()
loss = loss * weight
if avg_factor is not None:
loss = loss.sum() / avg_factor
elif reduction == "mean":
loss = loss.mean()
elif reduction == "sum":
loss = loss.sum()
return loss
def loss_local(self, outputs, targets, indices, num_boxes, T=5):
"""Compute Fine-Grained Localization (FGL) Loss
and Decoupled Distillation Focal (DDF) Loss."""
losses = {}
if "pred_corners" in outputs:
idx = self._get_source_permutation_idx(indices)
target_boxes = torch.cat([t["boxes"][i] for t, (_, i) in zip(targets, indices)], dim=0)
pred_corners = outputs["pred_corners"][idx].reshape(-1, (self.max_num_bins + 1))
ref_points = outputs["ref_points"][idx].detach()
with torch.no_grad():
self.fgl_targets = bbox2distance(
ref_points,
center_to_corners_format(target_boxes),
self.max_num_bins,
self.reg_scale,
self.up,
)
target_corners, weight_right, weight_left = self.fgl_targets
ious = torch.diag(
box_iou(center_to_corners_format(outputs["pred_boxes"][idx]), center_to_corners_format(target_boxes))[
0
]
)
weight_targets = ious.unsqueeze(-1).repeat(1, 1, 4).reshape(-1).detach()
losses["loss_fgl"] = self.unimodal_distribution_focal_loss(
pred_corners,
target_corners,
weight_right,
weight_left,
weight_targets,
avg_factor=num_boxes,
)
pred_corners = outputs["pred_corners"].reshape(-1, (self.max_num_bins + 1))
target_corners = outputs["teacher_corners"].reshape(-1, (self.max_num_bins + 1))
if torch.equal(pred_corners, target_corners):
losses["loss_ddf"] = pred_corners.sum() * 0
else:
weight_targets_local = outputs["teacher_logits"].sigmoid().max(dim=-1)[0]
mask = torch.zeros_like(weight_targets_local, dtype=torch.bool)
mask[idx] = True
mask = mask.unsqueeze(-1).repeat(1, 1, 4).reshape(-1)
weight_targets_local[idx] = ious.reshape_as(weight_targets_local[idx]).to(weight_targets_local.dtype)
weight_targets_local = weight_targets_local.unsqueeze(-1).repeat(1, 1, 4).reshape(-1).detach()
loss_match_local = (
weight_targets_local
* (T**2)
* (
nn.KLDivLoss(reduction="none")(
F.log_softmax(pred_corners / T, dim=1),
F.softmax(target_corners.detach() / T, dim=1),
)
).sum(-1)
)
batch_scale = 1 / outputs["pred_boxes"].shape[0] # it should be refined
self.num_pos, self.num_neg = (
(mask.sum() * batch_scale) ** 0.5,
((~mask).sum() * batch_scale) ** 0.5,
)
loss_match_local1 = loss_match_local[mask].mean() if mask.any() else 0
loss_match_local2 = loss_match_local[~mask].mean() if (~mask).any() else 0
losses["loss_ddf"] = (loss_match_local1 * self.num_pos + loss_match_local2 * self.num_neg) / (
self.num_pos + self.num_neg
)
return losses
def get_loss(self, loss, outputs, targets, indices, num_boxes):
loss_map = {
"cardinality": self.loss_cardinality,
"local": self.loss_local,
"boxes": self.loss_boxes,
"focal": self.loss_labels_focal,
"vfl": self.loss_labels_vfl,
}
if loss not in loss_map:
raise ValueError(f"Loss {loss} not supported")
return loss_map[loss](outputs, targets, indices, num_boxes)
def DFineForObjectDetectionLoss(
logits,
labels,
device,
pred_boxes,
config,
outputs_class=None,
outputs_coord=None,
enc_topk_logits=None,
enc_topk_bboxes=None,
denoising_meta_values=None,
predicted_corners=None,
initial_reference_points=None,
**kwargs,
):
criterion = DFineLoss(config)
criterion.to(device)
# Second: compute the losses, based on outputs and labels
outputs_loss = {}
outputs_loss["logits"] = logits
outputs_loss["pred_boxes"] = pred_boxes.clamp(min=0, max=1)
auxiliary_outputs = None
if config.auxiliary_loss:
if denoising_meta_values is not None:
dn_out_coord, outputs_coord = torch.split(
outputs_coord.clamp(min=0, max=1), denoising_meta_values["dn_num_split"], dim=2
)
dn_out_class, outputs_class = torch.split(outputs_class, denoising_meta_values["dn_num_split"], dim=2)
dn_out_corners, out_corners = torch.split(predicted_corners, denoising_meta_values["dn_num_split"], dim=2)
dn_out_refs, out_refs = torch.split(initial_reference_points, denoising_meta_values["dn_num_split"], dim=2)
auxiliary_outputs = _set_aux_loss2(
outputs_class[:, :-1].transpose(0, 1),
outputs_coord[:, :-1].transpose(0, 1),
out_corners[:, :-1].transpose(0, 1),
out_refs[:, :-1].transpose(0, 1),
out_corners[:, -1],
outputs_class[:, -1],
)
outputs_loss["auxiliary_outputs"] = auxiliary_outputs
outputs_loss["auxiliary_outputs"].extend(
_set_aux_loss([enc_topk_logits], [enc_topk_bboxes.clamp(min=0, max=1)])
)
dn_auxiliary_outputs = _set_aux_loss2(
dn_out_class.transpose(0, 1),
dn_out_coord.transpose(0, 1),
dn_out_corners.transpose(0, 1),
dn_out_refs.transpose(0, 1),
dn_out_corners[:, -1],
dn_out_class[:, -1],
)
outputs_loss["dn_auxiliary_outputs"] = dn_auxiliary_outputs
outputs_loss["denoising_meta_values"] = denoising_meta_values
loss_dict = criterion(outputs_loss, labels)
loss = sum(loss_dict.values())
return loss, loss_dict, auxiliary_outputs
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/loss/loss_for_object_detection.py | src/transformers/loss/loss_for_object_detection.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
from torch import Tensor
from ..utils import is_accelerate_available, is_scipy_available, is_vision_available, requires_backends
if is_accelerate_available():
from accelerate import PartialState
from accelerate.utils import reduce
if is_scipy_available():
from scipy.optimize import linear_sum_assignment
if is_vision_available():
from transformers.image_transforms import center_to_corners_format
def dice_loss(inputs, targets, num_boxes):
"""
Compute the DICE loss, similar to generalized IOU for masks
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs (0 for the negative class and 1 for the positive
class).
"""
inputs = inputs.sigmoid()
inputs = inputs.flatten(1)
numerator = 2 * (inputs * targets).sum(1)
denominator = inputs.sum(-1) + targets.sum(-1)
loss = 1 - (numerator + 1) / (denominator + 1)
return loss.sum() / num_boxes
def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2):
"""
Loss used in RetinaNet for dense detection: https://huggingface.co/papers/1708.02002.
Args:
inputs (`torch.FloatTensor` of arbitrary shape):
The predictions for each example.
targets (`torch.FloatTensor` with the same shape as `inputs`)
A tensor storing the binary classification label for each element in the `inputs` (0 for the negative class
and 1 for the positive class).
alpha (`float`, *optional*, defaults to `0.25`):
Optional weighting factor in the range (0,1) to balance positive vs. negative examples.
gamma (`int`, *optional*, defaults to `2`):
Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples.
Returns:
Loss tensor
"""
prob = inputs.sigmoid()
ce_loss = nn.functional.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
# add modulating factor
p_t = prob * targets + (1 - prob) * (1 - targets)
loss = ce_loss * ((1 - p_t) ** gamma)
if alpha >= 0:
alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
loss = alpha_t * loss
return loss.mean(1).sum() / num_boxes
# taken from https://github.com/facebookresearch/detr/blob/master/models/detr.py
class ImageLoss(nn.Module):
"""
This class computes the losses for DetrForObjectDetection/DetrForSegmentation. The process happens in two steps: 1)
we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair
of matched ground-truth / prediction (supervise class and box).
A note on the `num_classes` argument (copied from original repo in detr.py): "the naming of the `num_classes`
parameter of the criterion is somewhat misleading. It indeed corresponds to `max_obj_id` + 1, where `max_obj_id` is
the maximum id for a class in your dataset. For example, COCO has a `max_obj_id` of 90, so we pass `num_classes` to
be 91. As another example, for a dataset that has a single class with `id` 1, you should pass `num_classes` to be 2
(`max_obj_id` + 1). For more details on this, check the following discussion
https://github.com/facebookresearch/detr/issues/108#issuecomment-650269223"
Args:
matcher (`DetrHungarianMatcher`):
Module able to compute a matching between targets and proposals.
num_classes (`int`):
Number of object categories, omitting the special no-object category.
eos_coef (`float`):
Relative classification weight applied to the no-object category.
losses (`list[str]`):
List of all the losses to be applied. See `get_loss` for a list of all available losses.
"""
def __init__(self, matcher, num_classes, eos_coef, losses):
super().__init__()
self.matcher = matcher
self.num_classes = num_classes
self.eos_coef = eos_coef
self.losses = losses
empty_weight = torch.ones(self.num_classes + 1)
empty_weight[-1] = self.eos_coef
self.register_buffer("empty_weight", empty_weight)
# removed logging parameter, which was part of the original implementation
def loss_labels(self, outputs, targets, indices, num_boxes):
"""
Classification loss (NLL) targets dicts must contain the key "class_labels" containing a tensor of dim
[nb_target_boxes]
"""
if "logits" not in outputs:
raise KeyError("No logits were found in the outputs")
source_logits = outputs["logits"]
idx = self._get_source_permutation_idx(indices)
target_classes_o = torch.cat([t["class_labels"][J] for t, (_, J) in zip(targets, indices)])
target_classes = torch.full(
source_logits.shape[:2], self.num_classes, dtype=torch.int64, device=source_logits.device
)
target_classes[idx] = target_classes_o
loss_ce = nn.functional.cross_entropy(source_logits.transpose(1, 2), target_classes, self.empty_weight)
losses = {"loss_ce": loss_ce}
return losses
@torch.no_grad()
def loss_cardinality(self, outputs, targets, indices, num_boxes):
"""
Compute the cardinality error, i.e. the absolute error in the number of predicted non-empty boxes.
This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients.
"""
logits = outputs["logits"]
device = logits.device
target_lengths = torch.as_tensor([len(v["class_labels"]) for v in targets], device=device)
# Count the number of predictions that are NOT "no-object" (which is the last class)
card_pred = (logits.argmax(-1) != logits.shape[-1] - 1).sum(1)
card_err = nn.functional.l1_loss(card_pred.float(), target_lengths.float())
losses = {"cardinality_error": card_err}
return losses
def loss_boxes(self, outputs, targets, indices, num_boxes):
"""
Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss.
Targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]. The target boxes
are expected in format (center_x, center_y, w, h), normalized by the image size.
"""
if "pred_boxes" not in outputs:
raise KeyError("No predicted boxes found in outputs")
idx = self._get_source_permutation_idx(indices)
source_boxes = outputs["pred_boxes"][idx]
target_boxes = torch.cat([t["boxes"][i] for t, (_, i) in zip(targets, indices)], dim=0)
loss_bbox = nn.functional.l1_loss(source_boxes, target_boxes, reduction="none")
losses = {}
losses["loss_bbox"] = loss_bbox.sum() / num_boxes
loss_giou = 1 - torch.diag(
generalized_box_iou(center_to_corners_format(source_boxes), center_to_corners_format(target_boxes))
)
losses["loss_giou"] = loss_giou.sum() / num_boxes
return losses
def loss_masks(self, outputs, targets, indices, num_boxes):
"""
Compute the losses related to the masks: the focal loss and the dice loss.
Targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w].
"""
if "pred_masks" not in outputs:
raise KeyError("No predicted masks found in outputs")
source_idx = self._get_source_permutation_idx(indices)
target_idx = self._get_target_permutation_idx(indices)
source_masks = outputs["pred_masks"]
source_masks = source_masks[source_idx]
masks = [t["masks"] for t in targets]
# TODO use valid to mask invalid areas due to padding in loss
target_masks, valid = nested_tensor_from_tensor_list(masks).decompose()
target_masks = target_masks.to(source_masks)
target_masks = target_masks[target_idx]
# upsample predictions to the target size
source_masks = nn.functional.interpolate(
source_masks[:, None], size=target_masks.shape[-2:], mode="bilinear", align_corners=False
)
source_masks = source_masks[:, 0].flatten(1)
target_masks = target_masks.flatten(1)
target_masks = target_masks.view(source_masks.shape)
losses = {
"loss_mask": sigmoid_focal_loss(source_masks, target_masks, num_boxes),
"loss_dice": dice_loss(source_masks, target_masks, num_boxes),
}
return losses
def _get_source_permutation_idx(self, indices):
# permute predictions following indices
batch_idx = torch.cat([torch.full_like(source, i) for i, (source, _) in enumerate(indices)])
source_idx = torch.cat([source for (source, _) in indices])
return batch_idx, source_idx
def _get_target_permutation_idx(self, indices):
# permute targets following indices
batch_idx = torch.cat([torch.full_like(target, i) for i, (_, target) in enumerate(indices)])
target_idx = torch.cat([target for (_, target) in indices])
return batch_idx, target_idx
def get_loss(self, loss, outputs, targets, indices, num_boxes):
loss_map = {
"labels": self.loss_labels,
"cardinality": self.loss_cardinality,
"boxes": self.loss_boxes,
"masks": self.loss_masks,
}
if loss not in loss_map:
raise ValueError(f"Loss {loss} not supported")
return loss_map[loss](outputs, targets, indices, num_boxes)
def forward(self, outputs, targets):
"""
This performs the loss computation.
Args:
outputs (`dict`, *optional*):
Dictionary of tensors, see the output specification of the model for the format.
targets (`list[dict]`, *optional*):
List of dicts, such that `len(targets) == batch_size`. The expected keys in each dict depends on the
losses applied, see each loss' doc.
"""
outputs_without_aux = {k: v for k, v in outputs.items() if k != "auxiliary_outputs"}
# Retrieve the matching between the outputs of the last layer and the targets
indices = self.matcher(outputs_without_aux, targets)
# Compute the average number of target boxes across all nodes, for normalization purposes
num_boxes = sum(len(t["class_labels"]) for t in targets)
num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device)
world_size = 1
if is_accelerate_available():
if PartialState._shared_state != {}:
num_boxes = reduce(num_boxes)
world_size = PartialState().num_processes
num_boxes = torch.clamp(num_boxes / world_size, min=1).item()
# Compute all the requested losses
losses = {}
for loss in self.losses:
losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))
# In case of auxiliary losses, we repeat this process with the output of each intermediate layer.
if "auxiliary_outputs" in outputs:
for i, auxiliary_outputs in enumerate(outputs["auxiliary_outputs"]):
indices = self.matcher(auxiliary_outputs, targets)
for loss in self.losses:
if loss == "masks":
# Intermediate masks losses are too costly to compute, we ignore them.
continue
l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes)
l_dict = {k + f"_{i}": v for k, v in l_dict.items()}
losses.update(l_dict)
return losses
# taken from https://github.com/facebookresearch/detr/blob/master/models/matcher.py
class HungarianMatcher(nn.Module):
"""
This class computes an assignment between the targets and the predictions of the network.
For efficiency reasons, the targets don't include the no_object. Because of this, in general, there are more
predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, while the others are
un-matched (and thus treated as non-objects).
Args:
class_cost:
The relative weight of the classification error in the matching cost.
bbox_cost:
The relative weight of the L1 error of the bounding box coordinates in the matching cost.
giou_cost:
The relative weight of the giou loss of the bounding box in the matching cost.
"""
def __init__(self, class_cost: float = 1, bbox_cost: float = 1, giou_cost: float = 1):
super().__init__()
requires_backends(self, ["scipy"])
self.class_cost = class_cost
self.bbox_cost = bbox_cost
self.giou_cost = giou_cost
if class_cost == 0 and bbox_cost == 0 and giou_cost == 0:
raise ValueError("All costs of the Matcher can't be 0")
@torch.no_grad()
def forward(self, outputs, targets):
"""
Args:
outputs (`dict`):
A dictionary that contains at least these entries:
* "logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits
* "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates.
targets (`list[dict]`):
A list of targets (len(targets) = batch_size), where each target is a dict containing:
* "class_labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of
ground-truth
objects in the target) containing the class labels
* "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates.
Returns:
`list[Tuple]`: A list of size `batch_size`, containing tuples of (index_i, index_j) where:
- index_i is the indices of the selected predictions (in order)
- index_j is the indices of the corresponding selected targets (in order)
For each batch element, it holds: len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
"""
batch_size, num_queries = outputs["logits"].shape[:2]
# We flatten to compute the cost matrices in a batch
out_prob = outputs["logits"].flatten(0, 1).softmax(-1) # [batch_size * num_queries, num_classes]
out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4]
# Also concat the target labels and boxes
target_ids = torch.cat([v["class_labels"] for v in targets])
target_bbox = torch.cat([v["boxes"] for v in targets])
# Compute the classification cost. Contrary to the loss, we don't use the NLL,
# but approximate it in 1 - proba[target class].
# The 1 is a constant that doesn't change the matching, it can be omitted.
class_cost = -out_prob[:, target_ids]
# Compute the L1 cost between boxes
bbox_cost = torch.cdist(out_bbox, target_bbox, p=1)
# Compute the giou cost between boxes
giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox))
# Final cost matrix
cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost
cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu()
sizes = [len(v["boxes"]) for v in targets]
indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_matrix.split(sizes, -1))]
return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
# below: bounding box utilities taken from https://github.com/facebookresearch/detr/blob/master/util/box_ops.py
def _upcast(t: Tensor) -> Tensor:
# Protects from numerical overflows in multiplications by upcasting to the equivalent higher type
if t.is_floating_point():
return t if t.dtype in (torch.float32, torch.float64) else t.float()
else:
return t if t.dtype in (torch.int32, torch.int64) else t.int()
def box_area(boxes: Tensor) -> Tensor:
"""
Computes the area of a set of bounding boxes, which are specified by its (x1, y1, x2, y2) coordinates.
Args:
boxes (`torch.FloatTensor` of shape `(number_of_boxes, 4)`):
Boxes for which the area will be computed. They are expected to be in (x1, y1, x2, y2) format with `0 <= x1
< x2` and `0 <= y1 < y2`.
Returns:
`torch.FloatTensor`: a tensor containing the area for each box.
"""
boxes = _upcast(boxes)
return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
# modified from torchvision to also return the union
def box_iou(boxes1, boxes2):
area1 = box_area(boxes1)
area2 = box_area(boxes2)
left_top = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
right_bottom = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
width_height = (right_bottom - left_top).clamp(min=0) # [N,M,2]
inter = width_height[:, :, 0] * width_height[:, :, 1] # [N,M]
union = area1[:, None] + area2 - inter
iou = inter / union
return iou, union
def generalized_box_iou(boxes1, boxes2):
"""
Generalized IoU from https://giou.stanford.edu/. The boxes should be in [x0, y0, x1, y1] (corner) format.
Returns:
`torch.FloatTensor`: a [N, M] pairwise matrix, where N = len(boxes1) and M = len(boxes2)
"""
# degenerate boxes gives inf / nan results
# so do an early check
if not (boxes1[:, 2:] >= boxes1[:, :2]).all():
raise ValueError(f"boxes1 must be in [x0, y0, x1, y1] (corner) format, but got {boxes1}")
if not (boxes2[:, 2:] >= boxes2[:, :2]).all():
raise ValueError(f"boxes2 must be in [x0, y0, x1, y1] (corner) format, but got {boxes2}")
iou, union = box_iou(boxes1, boxes2)
top_left = torch.min(boxes1[:, None, :2], boxes2[:, :2])
bottom_right = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
width_height = (bottom_right - top_left).clamp(min=0) # [N,M,2]
area = width_height[:, :, 0] * width_height[:, :, 1]
return iou - (area - union) / area
# below: taken from https://github.com/facebookresearch/detr/blob/master/util/misc.py#L306
def _max_by_axis(the_list):
# type: (list[list[int]]) -> list[int]
maxes = the_list[0]
for sublist in the_list[1:]:
for index, item in enumerate(sublist):
maxes[index] = max(maxes[index], item)
return maxes
class NestedTensor:
def __init__(self, tensors, mask: Tensor | None):
self.tensors = tensors
self.mask = mask
def to(self, device):
cast_tensor = self.tensors.to(device)
mask = self.mask
if mask is not None:
cast_mask = mask.to(device)
else:
cast_mask = None
return NestedTensor(cast_tensor, cast_mask)
def decompose(self):
return self.tensors, self.mask
def __repr__(self):
return str(self.tensors)
def nested_tensor_from_tensor_list(tensor_list: list[Tensor]):
if tensor_list[0].ndim == 3:
max_size = _max_by_axis([list(img.shape) for img in tensor_list])
batch_shape = [len(tensor_list)] + max_size
batch_size, num_channels, height, width = batch_shape
dtype = tensor_list[0].dtype
device = tensor_list[0].device
tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
mask = torch.ones((batch_size, height, width), dtype=torch.bool, device=device)
for img, pad_img, m in zip(tensor_list, tensor, mask):
pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
m[: img.shape[1], : img.shape[2]] = False
else:
raise ValueError("Only 3-dimensional tensors are supported")
return NestedTensor(tensor, mask)
# taken from https://github.com/facebookresearch/detr/blob/master/models/detr.py
def _set_aux_loss(outputs_class, outputs_coord):
return [{"logits": a, "pred_boxes": b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])]
def ForSegmentationLoss(
logits, labels, device, pred_boxes, pred_masks, config, outputs_class=None, outputs_coord=None, **kwargs
):
# First: create the matcher
matcher = HungarianMatcher(class_cost=config.class_cost, bbox_cost=config.bbox_cost, giou_cost=config.giou_cost)
# Second: create the criterion
losses = ["labels", "boxes", "cardinality", "masks"]
criterion = ImageLoss(
matcher=matcher,
num_classes=config.num_labels,
eos_coef=config.eos_coefficient,
losses=losses,
)
criterion.to(device)
# Third: compute the losses, based on outputs and labels
outputs_loss = {}
outputs_loss["logits"] = logits
outputs_loss["pred_boxes"] = pred_boxes
outputs_loss["pred_masks"] = pred_masks
auxiliary_outputs = None
if config.auxiliary_loss:
auxiliary_outputs = _set_aux_loss(outputs_class, outputs_coord)
outputs_loss["auxiliary_outputs"] = auxiliary_outputs
loss_dict = criterion(outputs_loss, labels)
# Fourth: compute total loss, as a weighted sum of the various losses
weight_dict = {"loss_ce": 1, "loss_bbox": config.bbox_loss_coefficient}
weight_dict["loss_giou"] = config.giou_loss_coefficient
weight_dict["loss_mask"] = config.mask_loss_coefficient
weight_dict["loss_dice"] = config.dice_loss_coefficient
if config.auxiliary_loss:
aux_weight_dict = {}
for i in range(config.decoder_layers - 1):
aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()})
weight_dict.update(aux_weight_dict)
loss = sum(loss_dict[k] * weight_dict[k] for k in loss_dict if k in weight_dict)
return loss, loss_dict, auxiliary_outputs
def ForObjectDetectionLoss(
logits, labels, device, pred_boxes, config, outputs_class=None, outputs_coord=None, **kwargs
):
# First: create the matcher
matcher = HungarianMatcher(class_cost=config.class_cost, bbox_cost=config.bbox_cost, giou_cost=config.giou_cost)
# Second: create the criterion
losses = ["labels", "boxes", "cardinality"]
criterion = ImageLoss(
matcher=matcher,
num_classes=config.num_labels,
eos_coef=config.eos_coefficient,
losses=losses,
)
criterion.to(device)
# Third: compute the losses, based on outputs and labels
outputs_loss = {}
auxiliary_outputs = None
outputs_loss["logits"] = logits
outputs_loss["pred_boxes"] = pred_boxes
if config.auxiliary_loss:
auxiliary_outputs = _set_aux_loss(outputs_class, outputs_coord)
outputs_loss["auxiliary_outputs"] = auxiliary_outputs
loss_dict = criterion(outputs_loss, labels)
# Fourth: compute total loss, as a weighted sum of the various losses
weight_dict = {"loss_ce": 1, "loss_bbox": config.bbox_loss_coefficient}
weight_dict["loss_giou"] = config.giou_loss_coefficient
if config.auxiliary_loss:
aux_weight_dict = {}
for i in range(config.decoder_layers - 1):
aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()})
weight_dict.update(aux_weight_dict)
loss = sum(loss_dict[k] * weight_dict[k] for k in loss_dict if k in weight_dict)
return loss, loss_dict, auxiliary_outputs
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/loss/__init__.py | src/transformers/loss/__init__.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/integrations/aqlm.py | src/transformers/integrations/aqlm.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"AQLM (Additive Quantization of Language Model) integration file"
from ..quantizers.quantizers_utils import should_convert_module
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
import torch.nn as nn
logger = logging.get_logger(__name__)
def replace_with_aqlm_linear(model, modules_to_not_convert: list[str] | None = None, quantization_config=None):
"""
Public method that recursively replaces the Linear layers of the given model with AQLM quantized layers.
Args:
model (`torch.nn.Module`):
The model to convert, can be any `torch.nn.Module` instance.
modules_to_not_convert (`list[str]`, *optional*, defaults to `None`):
A list of nn.Linear weights to not convert. If a parameter path is in the list (e.g. `lm_head.weight`), the corresponding module will not be
converted.
quantization_config (`AqlmConfig`):
The quantization config object that contains the quantization parameters.
"""
from aqlm import QuantizedLinear
has_been_replaced = False
# we need this to correctly materialize the weights during quantization
for module_name, module in model.named_modules():
if not should_convert_module(module_name, modules_to_not_convert):
continue
with torch.device("meta"):
if isinstance(module, nn.Linear):
new_module = QuantizedLinear(
module.in_features,
module.out_features,
bias=module.bias is not None,
in_group_size=quantization_config.in_group_size,
out_group_size=quantization_config.out_group_size,
num_codebooks=quantization_config.num_codebooks,
nbits_per_codebook=quantization_config.nbits_per_codebook,
)
new_module.source_cls = type(module)
new_module.requires_grad_(False)
model.set_submodule(module_name, new_module)
has_been_replaced = True
if not has_been_replaced:
logger.warning(
"You are loading your model using eetq but no linear modules were found in your model."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug."
)
return model
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/integrations/eetq.py | src/transformers/integrations/eetq.py | # coding=utf-8
# Copyright 2024 NetEase, Inc. and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..core_model_loading import ConversionOps
from ..quantizers.quantizers_utils import should_convert_module
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
import torch.nn as nn
logger = logging.get_logger(__name__)
class EetqQuantize(ConversionOps):
def __init__(self, hf_quantizer):
self.hf_quantizer = hf_quantizer
def convert(
self, input_dict: dict[str, list[torch.Tensor]], full_layer_name: str | None = None, **kwargs
) -> dict[str, torch.Tensor]:
_, value = tuple(input_dict.items())[0]
value = value[0]
value_device = value.device
int8_weight = torch.t(value).contiguous().cpu()
int8_weight, scales = eetq_kernels_hub.quant_weights(int8_weight, torch.int8, False)
int8_weight = int8_weight.to(value_device)
scales = scales.to(value_device)
return {full_layer_name: int8_weight, f"{full_layer_name}_scales": scales}
class EetqLinearMMFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, x, weight, scales, bias=None):
# The forward pass can use ctx.
ctx.save_for_backward(x, weight, scales, bias)
output = eetq_kernels_hub.w8_a16_gemm(x, weight, scales)
output = output + bias if bias is not None else output
return output
@staticmethod
def backward(ctx, grad_output):
input, weight, scales, bias = ctx.saved_tensors
identity = torch.eye(weight.shape[0]).to(weight.device).to(input.dtype)
# Dequantize the weight
weight = eetq_kernels_hub.w8_a16_gemm(identity, weight, scales)
if ctx.needs_input_grad[0]:
# 2D matrix multiplication, unsqueeze to 3D
grad_input = grad_output.squeeze(0).matmul(weight.transpose(0, 1)).unsqueeze(0)
return grad_input, None, None, None
class EetqLinear(nn.Module):
def __init__(self, in_features, out_features, dtype=torch.int8, bias=False):
super().__init__()
self.weight = nn.Parameter(torch.empty((in_features, out_features), dtype=dtype), requires_grad=False)
self.weight_scales = nn.Parameter(torch.empty((out_features), dtype=torch.float16))
if bias:
self.bias = nn.Parameter(torch.empty((out_features), dtype=torch.float16))
else:
self.bias = None
def forward(self, input):
output = EetqLinearMMFunction.apply(input, self.weight, self.weight_scales, self.bias)
return output
def replace_with_eetq_linear(model, modules_to_not_convert: list[str] | None = None, pre_quantized=False):
"""
A helper function to replace all `torch.nn.Linear` modules by `EetqLinear` modules.
Parameters:
model (`torch.nn.Module`):
Input model or `torch.nn.Module` as the function is run recursively.
modules_to_not_convert (`list[`str`]`, *optional*, defaults to `None`):
Names of the modules to not convert in `EetqLinear`. In practice we keep the `lm_head` in full precision
for numerical stability reasons.
"""
from .hub_kernels import get_kernel
global eetq_kernels_hub
eetq_kernels_hub = get_kernel("kernels-community/quantization-eetq")
has_been_replaced = False
# we need this to correctly materialize the weights during quantization
module_kwargs = {} if pre_quantized else {"dtype": None}
for module_name, module in model.named_modules():
if not should_convert_module(module_name, modules_to_not_convert):
continue
with torch.device("meta"):
if isinstance(module, nn.Linear):
new_module = EetqLinear(
module.in_features, module.out_features, bias=module.bias is not None, **module_kwargs
)
model.set_submodule(module_name, new_module)
has_been_replaced = True
if not has_been_replaced:
logger.warning(
"You are loading your model using eetq but no linear modules were found in your model."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug."
)
return model
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/integrations/flash_attention.py | src/transformers/integrations/flash_attention.py | import torch
from ..modeling_flash_attention_utils import _flash_attention_forward, flash_attn_supports_top_left_mask
from ..utils import logging
logger = logging.get_logger(__name__)
_use_top_left_mask = flash_attn_supports_top_left_mask()
def get_target_dtype(query: torch.Tensor, module: torch.nn.Module) -> torch.dtype:
"""If the query is in float32, return a target dtype compatible with flash attention. Return None otherwise."""
if query.dtype == torch.float32:
if torch.is_autocast_enabled():
# NOTE: `torch.get_autocast_dtype` is there starting from PyTorch 2.4
return (
torch.get_autocast_dtype("cuda")
if hasattr(torch, "get_autocast_dtype")
else torch.get_autocast_gpu_dtype()
)
# Handle the case where the model is quantized
elif hasattr(module.config, "quantization_config"):
return module.config.dtype
else:
return next(layer for layer in module.modules() if isinstance(layer, torch.nn.Linear)).weight.dtype
return None
def flash_attention_forward(
module: torch.nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: torch.Tensor | None,
dropout: float = 0.0,
scaling: float | None = None,
sliding_window: int | None = None,
softcap: float | None = None,
is_causal: bool | None = None,
**kwargs,
) -> tuple[torch.Tensor, None]:
if kwargs.get("output_attentions", False):
logger.warning_once(
"`flash_attention_2` does not support `output_attentions=True`."
" Please set your attention to `eager` if you want any of these features."
)
# This is before the transpose
seq_len = query.shape[2]
if any(dim == 0 for dim in query.shape):
raise ValueError(
"Tensor query has shape with a zero dimension.\n"
"FlashAttention does not support inputs with dim=0.\n"
"Please check your input shapes or use SDPA instead."
)
# FA2 uses non-transposed inputs
query = query.transpose(1, 2)
key = key.transpose(1, 2)
value = value.transpose(1, 2)
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
# therefore the input hidden states gets silently casted in float32. Hence, we need
# cast them back in the correct dtype just to be sure everything works as expected.
# This might slowdown training & inference so it is recommended to not cast the LayerNorms
# in fp32. (usually our RMSNorm modules handle it correctly)
target_dtype = get_target_dtype(query, module)
# Instead of relying on the value set in the module directly, we use the is_causal passed in kwargs if it is presented
is_causal = is_causal if is_causal is not None else module.is_causal
attn_output = _flash_attention_forward(
query,
key,
value,
attention_mask,
query_length=seq_len,
is_causal=is_causal,
dropout=dropout,
softmax_scale=scaling,
sliding_window=sliding_window,
softcap=softcap,
use_top_left_mask=_use_top_left_mask,
target_dtype=target_dtype,
attn_implementation=module.config._attn_implementation,
layer_idx=module.layer_idx if hasattr(module, "layer_idx") else None,
**kwargs,
)
return attn_output, None
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/integrations/sdpa_attention.py | src/transformers/integrations/sdpa_attention.py | import torch
from ..utils import is_torch_npu_available, is_torch_xpu_available, logging
from ..utils.import_utils import is_torch_greater_or_equal
logger = logging.get_logger(__name__)
_is_torch_greater_or_equal_than_2_5 = is_torch_greater_or_equal("2.5", accept_dev=True)
_is_torch_greater_or_equal_than_2_8 = is_torch_greater_or_equal("2.8", accept_dev=True)
_is_torch_xpu_available = is_torch_xpu_available()
_is_torch_npu_available = is_torch_npu_available()
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def use_gqa_in_sdpa(attention_mask: torch.Tensor | None, key: torch.Tensor) -> bool:
# GQA can only be used under the following conditions
# 1.cuda or Ascend NPU
# - torch version >= 2.5
# - attention_mask is None (otherwise it will fall back to the math kernel)
# 2.xpu
# - torch version >= 2.8
if _is_torch_xpu_available:
return _is_torch_greater_or_equal_than_2_8
return _is_torch_greater_or_equal_than_2_5 and attention_mask is None
def sdpa_attention_forward(
module: torch.nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: torch.Tensor | None,
dropout: float = 0.0,
scaling: float | None = None,
is_causal: bool | None = None,
**kwargs,
) -> tuple[torch.Tensor, None]:
if kwargs.get("output_attentions", False):
logger.warning_once(
"`sdpa` attention does not support `output_attentions=True`."
" Please set your attention to `eager` if you want any of these features."
)
sdpa_kwargs = {}
if hasattr(module, "num_key_value_groups"):
if not use_gqa_in_sdpa(attention_mask, key):
key = repeat_kv(key, module.num_key_value_groups)
value = repeat_kv(value, module.num_key_value_groups)
else:
sdpa_kwargs = {"enable_gqa": True}
# Instead of relying on the value set in the module directly, we use the is_causal passed in kwargs if it is presented
is_causal = is_causal if is_causal is not None else getattr(module, "is_causal", True)
# SDPA's Flash Attention (and cuDNN) kernels rely on the `is_causal` flag. However, there are certain conditions:
# - Not in decoding phase (otherwise we want full attention on the single query token)
# - Attention mask is not to be provided (even if it is a causal pattern)
# - Internally, we marked this as compatible with causal, i.e. it is a decoder attention type
#
# Quirks on the conditionals:
# - We avoid inline passing this to the SDPA function directly to support both torch.compile's dynamic shapes and
# full graph options. Otherwise, dynamic shapes are prevented from compiling.
# - It is important to check first for the shape, otherwise compile will fail with
# `argument 'is_causal' must be bool, not SymBool`.
is_causal = query.shape[2] > 1 and attention_mask is None and is_causal
# Shapes (e.g. query.shape[2]) are tensors during jit tracing, resulting in `is_causal` being a tensor.
# We convert it to a bool for the SDPA kernel that only accepts bools.
if torch.jit.is_tracing() and isinstance(is_causal, torch.Tensor):
is_causal = is_causal.item()
# When `is_causal = False` and the `attention_mask` is not of boolean type, the Ascend NPU's SDPA interface cannot utilize the FlashAttentionScore operator,
# and falls back to small-operator concatenation. To invoke the FlashAttentionScore, the attention_mask must be converted to boolean type.
# This adaptation ensures the `attention_mask` meets the requirement for using FlashAttentionScore.
if _is_torch_npu_available:
if attention_mask is not None and attention_mask.dtype != torch.bool:
# Convert to boolean type, making sdpa to force call FlashAttentionScore to improve performance.
attention_mask = torch.logical_not(attention_mask.bool()).to(query.device)
attn_output = torch.nn.functional.scaled_dot_product_attention(
query,
key,
value,
attn_mask=attention_mask,
dropout_p=dropout,
scale=scaling,
is_causal=is_causal,
**sdpa_kwargs,
)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, None
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/integrations/hub_kernels.py | src/transformers/integrations/hub_kernels.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib.metadata
import os
import re
from collections.abc import Callable
from types import ModuleType
from packaging import version as pkg_version
from ..utils import ENV_VARS_TRUE_VALUES, logging
from ..utils.import_utils import is_kernels_available
from .flash_attention import flash_attention_forward
logger = logging.get_logger(__name__)
try:
from kernels import (
Device,
LayerRepository,
Mode,
register_kernel_mapping,
replace_kernel_forward_from_hub,
)
from kernels import (
get_kernel as get_kernel_hub,
)
from kernels import (
use_kernel_forward_from_hub as _kernels_use_kernel_forward_from_hub,
)
# Try to import FuncRepository, fallback if not available
try:
from kernels import FuncRepository
except ImportError:
FuncRepository = None
# Try to import use_kernel_func_from_hub, fallback if not available
try:
from kernels import use_kernel_func_from_hub as _kernels_use_kernel_func_from_hub
_has_use_kernel_func_from_hub = True
except ImportError:
_has_use_kernel_func_from_hub = False
_TRANSFORMERS_USE_HUB_KERNELS = os.environ.get("USE_HUB_KERNELS", "YES").upper()
_kernels_available = True
_kernels_enabled = _TRANSFORMERS_USE_HUB_KERNELS in ENV_VARS_TRUE_VALUES
def use_kernel_forward_from_hub(layer_name: str):
if _kernels_enabled:
return _kernels_use_kernel_forward_from_hub(layer_name)
else:
logger.warning_once(
f"kernels hub usage is disabled through the environment USE_HUB_KERNELS={_TRANSFORMERS_USE_HUB_KERNELS}"
)
return lambda cls: cls
def use_kernel_func_from_hub(func_name: str):
if _kernels_enabled and _has_use_kernel_func_from_hub:
return _kernels_use_kernel_func_from_hub(func_name)
else:
if not _has_use_kernel_func_from_hub:
logger.warning_once(
"use_kernel_func_from_hub is not available in the installed kernels version. "
"Please upgrade kernels to use this feature."
)
else:
logger.warning_once(
f"kernels hub usage is disabled through the environment USE_HUB_KERNELS={_TRANSFORMERS_USE_HUB_KERNELS}"
)
return lambda func: func
_KERNEL_MAPPING: dict[str, dict[Device | str, LayerRepository | dict[Mode, LayerRepository]]] = {
"MultiScaleDeformableAttention": {
"cuda": LayerRepository(
repo_id="kernels-community/deformable-detr",
layer_name="MultiScaleDeformableAttention",
)
},
"Llama4TextMoe": {
"cuda": LayerRepository(
repo_id="kernels-community/moe",
layer_name="Llama4TextMoe",
)
},
"RMSNorm": {
"cuda": {
Mode.INFERENCE: LayerRepository(
repo_id="kernels-community/liger_kernels",
layer_name="LigerRMSNorm",
# revision="pure-layer-test",
),
},
"rocm": {
Mode.INFERENCE: LayerRepository(
repo_id="kernels-community/liger_kernels",
layer_name="LigerRMSNorm",
)
},
"xpu": {
Mode.INFERENCE: LayerRepository(
repo_id="kernels-community/rmsnorm",
layer_name="RMSNorm",
)
},
"mps": {
Mode.INFERENCE: LayerRepository(
repo_id="kernels-community/mlx_rmsnorm",
layer_name="RMSNorm",
)
},
"npu": {
Mode.INFERENCE: LayerRepository(
repo_id="kernels-community/liger_kernels",
layer_name="LigerRMSNorm",
)
},
},
"MLP": {
"cuda": LayerRepository(
repo_id="medmekk/triton-llama-mlp",
layer_name="TritonLlamaMLP",
)
},
"MegaBlocksMoeMLP": {
"cuda": {
Mode.TRAINING: LayerRepository(
repo_id="kernels-community/megablocks",
layer_name="MegaBlocksMoeMLP",
),
Mode.INFERENCE: LayerRepository(
repo_id="kernels-community/megablocks",
layer_name="MegaBlocksMoeMLP",
),
},
"rocm": {
Mode.INFERENCE: LayerRepository(
repo_id="ahadnagy/megablocks",
layer_name="MegaBlocksMoeMLP",
)
},
},
"FastGELU": {
"cuda": {
Mode.INFERENCE | Mode.TORCH_COMPILE: LayerRepository(
repo_id="kernels-community/activation",
layer_name="FastGELU",
version=">=0.0.4,<0.1.0",
)
}
},
"QuickGELU": {
"cuda": {
Mode.INFERENCE | Mode.TORCH_COMPILE: LayerRepository(
repo_id="kernels-community/activation",
layer_name="QuickGELU",
version=">=0.0.4,<0.1.0",
)
}
},
"NewGELU": {
"cuda": {
Mode.INFERENCE | Mode.TORCH_COMPILE: LayerRepository(
repo_id="kernels-community/activation",
layer_name="NewGELU",
version=">=0.0.4,<0.1.0",
)
}
},
"SiLU": {
"cuda": {
Mode.INFERENCE | Mode.TORCH_COMPILE: LayerRepository(
repo_id="kernels-community/activation", layer_name="Silu", version=">=0.1.0"
)
}
},
"GeLU": {
"cuda": {
Mode.INFERENCE | Mode.TORCH_COMPILE: LayerRepository(
repo_id="kernels-community/activation", layer_name="Gelu", version=">=0.1.0"
)
}
},
"GeluTanh": {
"cuda": {
Mode.INFERENCE | Mode.TORCH_COMPILE: LayerRepository(
repo_id="kernels-community/activation", layer_name="GeluTanh", version=">=0.1.0"
)
}
},
}
# Add function kernel mappings if FuncRepository is available
if FuncRepository is not None:
_KERNEL_MAPPING["rotary_pos_emb"] = {
"xpu": {
Mode.INFERENCE: FuncRepository(
repo_id="kernels-community/rotary", func_name="apply_rotary_transformers"
)
}
}
def has_key(d, key):
return key in d or any(isinstance(v, dict) and has_key(v, key) for v in d.values())
def register_kernel_mapping_transformers(mapping=None):
if mapping is None:
mapping = _KERNEL_MAPPING
if has_key(mapping, "xpu") and not is_kernels_available(MIN_VERSION="0.10.2"):
raise ImportError(
"kernels uses an incompatible version. Please install the latest version with `pip install -U kernels`."
)
register_kernel_mapping(mapping)
except ImportError:
_kernels_available = False
_kernels_enabled = False
# Stub to make decorators int transformers work when `kernels`
# is not installed.
def use_kernel_forward_from_hub(*args, **kwargs):
def decorator(cls):
return cls
return decorator
def use_kernel_func_from_hub(*args, **kwargs):
def decorator(func):
return func
return decorator
class LayerRepository:
def __init__(self, *args, **kwargs):
raise RuntimeError("LayerRepository requires `kernels` to be installed. Run `pip install kernels`.")
def replace_kernel_forward_from_hub(*args, **kwargs):
raise RuntimeError(
"replace_kernel_forward_from_hub requires `kernels` to be installed. Run `pip install kernels`."
)
def register_kernel_mapping(*args, **kwargs):
raise RuntimeError("register_kernel_mapping requires `kernels` to be installed. Run `pip install kernels`.")
def register_kernel_mapping_transformers(*args, **kwargs):
raise RuntimeError(
"register_kernel_mapping_transformers requires `kernels` to be installed. Run `pip install kernels`."
)
_HUB_KERNEL_MAPPING: dict[str, dict[str, str]] = {
"causal-conv1d": {"repo_id": "kernels-community/causal-conv1d"},
"mamba-ssm": {"repo_id": "kernels-community/mamba-ssm", "revision": "v0.0.4"},
"falcon_mamba-ssm": {"repo_id": "kernels-community/mamba-ssm", "revision": "v0.0.4"},
}
_KERNEL_MODULE_MAPPING: dict[str, ModuleType | None] = {}
def is_kernel(attn_implementation: str | None) -> bool:
"""Check whether `attn_implementation` matches a kernel pattern from the hub."""
return (
attn_implementation is not None
and re.search(r"^[^/:]+/[^/:]+(?:@[^/:]+)?(?::[^/:]+)?$", attn_implementation) is not None
)
def load_and_register_attn_kernel(
attn_implementation: str, attention_wrapper: Callable | None = None
) -> ModuleType | None:
"""
Load and register the kernel associated to `attn_implementation`.
Args:
attn_implementation: A string, usually a kernel repo like "kernels-community/flash-mla".
attn_wrapper: a callable for the wrapper around the attention implementation. In `transformers` we
have a wrapper around the `flash_attn_var_len` call, and the same goes for `sdpa` and `eager`.
They just prepare the arguments properly. This is mostly used for continious batching, where we
want the `paged` wrapper, which calls the paged cache.
"""
from ..masking_utils import ALL_MASK_ATTENTION_FUNCTIONS
from ..modeling_utils import ALL_ATTENTION_FUNCTIONS
actual_attn_name = attn_implementation.split("|")[1] if "|" in attn_implementation else attn_implementation
if not is_kernel(actual_attn_name):
return None
if not _kernels_available:
raise ImportError(
"`kernels` is either not installed or uses an incompatible version. "
"Please install the latest version with `pip install -U kernels`."
)
# Extract repo_id and kernel_name from the string
if ":" in actual_attn_name:
repo_id, kernel_name = actual_attn_name.split(":")
kernel_name = kernel_name.strip()
else:
repo_id = actual_attn_name
kernel_name = None
repo_id = repo_id.strip()
# extract the rev after the @ if it exists
repo_id, _, rev = repo_id.partition("@")
repo_id = repo_id.strip()
rev = rev.strip() if rev else None
# Load the kernel from hub
try:
kernel = get_kernel(repo_id, revision=rev)
except Exception as e:
raise ValueError(f"An error occurred while trying to load from '{repo_id}': {e}.")
# correctly wrap the kernel
if hasattr(kernel, "flash_attn_varlen_func"):
if attention_wrapper is None:
attention_wrapper = flash_attention_forward
kernel_function = attention_wrapper
elif kernel_name is not None:
kernel_function = getattr(kernel, kernel_name)
# Register the kernel as a valid attention
ALL_ATTENTION_FUNCTIONS.register(attn_implementation, kernel_function)
ALL_MASK_ATTENTION_FUNCTIONS.register(attn_implementation, ALL_MASK_ATTENTION_FUNCTIONS["flash_attention_2"])
return kernel
def lazy_load_kernel(kernel_name: str, mapping: dict[str, ModuleType | None] = _KERNEL_MODULE_MAPPING):
if kernel_name in mapping and isinstance(mapping[kernel_name], ModuleType):
return mapping[kernel_name]
if kernel_name not in _HUB_KERNEL_MAPPING:
logger.warning_once(f"Kernel {kernel_name} not found in _HUB_KERNEL_MAPPING")
mapping[kernel_name] = None
return None
if _kernels_available:
try:
repo_id = _HUB_KERNEL_MAPPING[kernel_name]["repo_id"]
revision = _HUB_KERNEL_MAPPING[kernel_name].get("revision", None)
version = _HUB_KERNEL_MAPPING[kernel_name].get("version", None)
kernel = get_kernel(repo_id, revision=revision, version=version)
mapping[kernel_name] = kernel
except FileNotFoundError:
mapping[kernel_name] = None
except AssertionError:
# Happens when torch is built without an accelerator backend; fall back to slow path.
mapping[kernel_name] = None
else:
# Try to import is_{kernel_name}_available from ..utils
import importlib
new_kernel_name = kernel_name.replace("-", "_")
func_name = f"is_{new_kernel_name}_available"
try:
utils_mod = importlib.import_module("..utils.import_utils", __package__)
is_kernel_available = getattr(utils_mod, func_name, None)
except Exception:
is_kernel_available = None
if callable(is_kernel_available) and is_kernel_available():
# Try to import the module "{kernel_name}" from parent package level
try:
module = importlib.import_module(f"{new_kernel_name}")
mapping[kernel_name] = module
return module
except Exception:
mapping[kernel_name] = None
else:
mapping[kernel_name] = None
return mapping[kernel_name]
def get_kernel(kernel_name: str, revision: str | None = None, version: str | None = None) -> ModuleType:
from .. import __version__
user_agent = {"framework": "transformers", "version": __version__, "repo_id": kernel_name}
if _kernels_available:
kernels_version = importlib.metadata.version("kernels")
if pkg_version.parse(kernels_version) >= pkg_version.parse("0.10.4"):
return get_kernel_hub(kernel_name, revision=revision, version=version, user_agent=user_agent)
else:
return get_kernel_hub(kernel_name, revision=revision)
else:
raise ImportError("kernels is not installed, please install it with `pip install kernels`")
def use_kernelized_func(module_names: list[Callable] | Callable):
"""
This decorator attaches the target function as an attribute of the module.
The function must already be decorated with @use_kernel_func_from_hub
this decorator then wraps it as an nn.Module internally.
When kernelize is later applied to the full model, the function can be accessed as a regular module attribute and kernelized just like any other layer.
The kernelization is performed in place, modifying the module directly.
"""
if isinstance(module_names, Callable):
module_names = [module_names]
def decorator(cls):
orig_init = cls.__init__
def new_init(self, *args, **kwargs):
orig_init(self, *args, **kwargs)
for fn in module_names:
# we hardcode the name of the function to "rotary_fn" for now
setattr(self, "rotary_fn", fn)
cls.__init__ = new_init
return cls
return decorator
__all__ = [
"LayerRepository",
"use_kernel_forward_from_hub",
"use_kernel_func_from_hub",
"register_kernel_mapping",
"register_kernel_mapping_transformers",
"replace_kernel_forward_from_hub",
"lazy_load_kernel",
"get_kernel",
"use_kernelized_func",
] # type: ignore
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/integrations/timm.py | src/transformers/integrations/timm.py | # coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This integration has for unique goal to allow re-initialization of the non-persistent buffers of `timm` models.
Indeed, as we load models fully on meta by default, we need a way to get back the correct value of non-persistent buffers.
We assume that everything else, i.e. parameters and persistent buffers, will correctly reside in model checkpoints, so we
won't need to reinit them.
Do not rely on it, as we will work to integrate it directly in `timm`, to then remove this file without warning.
"""
from math import comb
import torch
from .. import initialization as init
from ..utils import is_timm_available
if is_timm_available():
from timm.layers import ndgrid
from timm.layers.blur_pool import BlurPool2d
from timm.layers.lambda_layer import LambdaLayer, rel_pos_indices
from timm.layers.pos_embed_rel import (
RelPosBias,
RelPosBiasTf,
RelPosMlp,
gen_relative_log_coords,
gen_relative_position_index,
generate_lookup_tensor,
)
from timm.layers.pos_embed_sincos import (
FourierEmbed,
RotaryEmbedding,
RotaryEmbeddingCat,
RotaryEmbeddingDinoV3,
RotaryEmbeddingMixed,
freq_bands,
pixel_freq_bands,
)
from timm.models.beit import Attention
from timm.models.beit import gen_relative_position_index as beit_gen_relative_position_index
from timm.models.efficientformer_v2 import Attention2d, Attention2dDownsample
from timm.models.eva import EvaAttention
from timm.models.levit import AttentionDownsample
from timm.models.swin_transformer import SwinTransformerBlock, get_relative_position_index
from timm.models.swin_transformer import WindowAttention as SwinWindowAttention
from timm.models.swin_transformer_v2 import SwinTransformerV2Block
from timm.models.swin_transformer_v2 import WindowAttention as Swin2WindowAttention
from timm.models.swin_transformer_v2_cr import SwinTransformerV2CrBlock, WindowMultiHeadAttention
from timm.models.vision_transformer import ParallelScalingBlock
# This one is very recent and is not necesarily in all versions we support (we require timm>=1.0.20)
try:
from timm.models.csatv2 import _DCT_MEAN, _DCT_VAR, LearnableDct2d
except Exception:
_DCT_MEAN, _DCT_VAR, LearnableDct2d = None, None, type(None)
def _maybe_reinit_non_persistent_buffer(module):
"""Reinit the non-persistent buffers of `module` if it matches any timm Module which has any."""
# This is a loooong list of hardcoded combinations from timm, as the modules do not provide a nice way to do
# it natively
if isinstance(module, FourierEmbed):
init.copy_(module.bands, pixel_freq_bands(module.max_res, module.num_bands))
elif isinstance(module, RotaryEmbedding):
if module.bands is not None:
bands = (
pixel_freq_bands(module.dim // 4, float(module.max_res), linear_bands=module.linear_bands)
if module.in_pixels
else freq_bands(module.dim // 4, temperature=module.temperature, step=1)
)
init.copy_(module.bands, bands)
elif module.pos_embed_sin is not None:
emb_sin, emb_cos = module._get_pos_embed_values(module.feat_shape)
init.copy_(module.pos_embed_sin, emb_sin)
init.copy_(module.pos_embed_cos, emb_cos)
elif isinstance(module, RotaryEmbeddingCat):
if module.bands is not None:
bands = (
pixel_freq_bands(module.dim // 4, float(module.max_res), linear_bands=module.linear_bands)
if module.in_pixels
else freq_bands(module.dim // 4, temperature=module.temperature, step=1)
)
init.copy_(module.bands, bands)
elif module.pos_embed is not None:
init.copy_(module.pos_embed, module._get_pos_embed_values(feat_shape=module.feat_shape))
elif isinstance(module, RotaryEmbeddingMixed):
if module.t_x is not None:
t_x, t_y = module._get_grid_values(module.feat_shape)
init.copy_(module.t_x, t_x)
init.copy(module.t_y, t_y)
elif isinstance(module, RotaryEmbeddingDinoV3):
init.copy_(module.periods, module._compute_periods())
if module.pos_embed_cached is not None:
init.copy_(module.pos_embed_cached, module._create_embed(module.feat_shape, no_aug=True))
elif isinstance(module, RelPosBias):
has_class_token = module.relative_position_bias_table.shape[0] > (2 * module.window_size[0] - 1) * (
2 * module.window_size[1] - 1
)
init.copy_(
module.relative_position_index,
gen_relative_position_index(module.window_size, class_token=has_class_token).view(-1),
)
elif isinstance(module, RelPosMlp):
init.copy_(module.relative_position_index, gen_relative_position_index(module.window_size).view(-1))
# This one is supposed to pass args `pretrained_window_size` as well to `gen_relative_log_coords`, but it's
# not recorded as class attributes in `__init__` and we have no way to infer its value back as we do for `mode` here...
# Let's hope it's always default value
mode = "cr" if module.bias_gain is None else "swin"
init.copy_(module.rel_coords_log, gen_relative_log_coords(module.window_size, mode=mode))
elif isinstance(module, RelPosBiasTf):
init.copy_(module.height_lookup, generate_lookup_tensor(module.window_size[0]))
init.copy_(module.width_lookup, generate_lookup_tensor(module.window_size[1]))
elif isinstance(module, LearnableDct2d):
init.copy_(module.mean, torch.tensor(_DCT_MEAN))
init.copy_(module.var, torch.tensor(_DCT_VAR))
init.copy_(module.imagenet_mean, torch.tensor([0.485, 0.456, 0.406]).view(3, 1, 1))
init.copy_(module.imagenet_std, torch.tensor([0.229, 0.224, 0.225]).view(3, 1, 1))
elif isinstance(module, LambdaLayer):
if module.rel_pos_indices is not None:
rel_size = module.pos_enb.shape[:2]
feat_size = [(s + 1) // 2 for s in rel_size]
init.copy_(module.rel_pos_indices, rel_pos_indices(feat_size))
elif isinstance(module, AttentionDownsample):
k_pos = torch.stack(
ndgrid(
torch.arange(module.resolution[0], dtype=torch.long),
torch.arange(module.resolution[1], dtype=torch.long),
)
).flatten(1)
q_pos = torch.stack(
ndgrid(
torch.arange(0, module.resolution[0], step=module.stride, dtype=torch.long),
torch.arange(0, module.resolution[1], step=module.stride, dtype=torch.long),
)
).flatten(1)
rel_pos = (q_pos[..., :, None] - k_pos[..., None, :]).abs()
rel_pos = (rel_pos[0] * module.resolution[1]) + rel_pos[1]
init.copy_(module.attention_bias_idxs, rel_pos)
elif isinstance(
module,
EvaAttention,
):
if module.k_bias is not None:
init.zeros_(module.k_bias)
elif isinstance(module, ParallelScalingBlock):
if module.qkv_bias is not None:
init.zeros_(module.qkv_bias)
elif isinstance(module, Attention):
if module.k_bias is not None:
init.zeros_(module.k_bias)
if module.relative_position_index is not None:
init.copy_(module.relative_position_index, beit_gen_relative_position_index(module.window_size))
elif isinstance(module, SwinTransformerV2CrBlock):
if module.attn_mask is not None:
init.copy_(module.attn_mask, module.get_attn_mask())
elif isinstance(module, WindowMultiHeadAttention):
module._make_pair_wise_relative_positions()
elif isinstance(module, BlurPool2d):
coeffs = torch.tensor(
[comb(module.filt_size - 1, k) for k in range(module.filt_size)], dtype=torch.float32
) / (2 ** (module.filt_size - 1))
blur_filter = (coeffs[:, None] * coeffs[None, :])[None, None, :, :]
if module.channels is not None:
blur_filter = blur_filter.repeat(module.channels, 1, 1, 1)
init.copy_(module.filt, blur_filter)
elif isinstance(module, Swin2WindowAttention):
module._make_pair_wise_relative_positions()
if module.k_bias is not None:
init.zeros_(module.k_bias)
elif isinstance(module, SwinTransformerV2Block):
if module.attn_mask is not None:
init.copy_(module.attn_mask, module.get_attn_mask())
elif isinstance(module, SwinWindowAttention):
init.copy_(module.relative_position_index, get_relative_position_index(*module.window_size))
elif isinstance(module, SwinTransformerBlock):
if module.attn_mask is not None:
init.copy_(module.attn_mask, module.get_attn_mask())
elif isinstance(module, Attention2d):
pos = torch.stack(
ndgrid(
torch.arange(module.resolution[0], dtype=torch.long),
torch.arange(module.resolution[1], dtype=torch.long),
)
).flatten(1)
rel_pos = (pos[..., :, None] - pos[..., None, :]).abs()
rel_pos = (rel_pos[0] * module.resolution[1]) + rel_pos[1]
init.copy_(module.attention_bias_idxs, rel_pos)
elif isinstance(module, Attention2dDownsample):
k_pos = torch.stack(
ndgrid(
torch.arange(module.resolution[0], dtype=torch.long),
torch.arange(module.resolution[1], dtype=torch.long),
)
).flatten(1)
q_pos = torch.stack(
ndgrid(
torch.arange(0, module.resolution[0], step=2, dtype=torch.long),
torch.arange(0, module.resolution[1], step=2, dtype=torch.long),
)
).flatten(1)
rel_pos = (q_pos[..., :, None] - k_pos[..., None, :]).abs()
rel_pos = (rel_pos[0] * module.resolution[1]) + rel_pos[1]
init.copy_(module.attention_bias_idxs, rel_pos)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/integrations/awq.py | src/transformers/integrations/awq.py | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"AWQ (Activation aware Weight Quantization) integration file"
from typing import Optional, Union
from ..quantizers.quantizers_utils import should_convert_module
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
import torch.nn as nn
logger = logging.get_logger(__name__)
AWQ_SCALES_MAPPINGS = {
"starcoder2": {"act": "act", "layer_before_act": "c_fc"},
"RefinedWebModel": {"act": "act", "layer_before_act": "dense_h_to_4h"},
"falcon": {"act": "act", "layer_before_act": "dense_h_to_4h"},
"mpt": {"act": "act", "layer_before_act": "up_proj"},
"gptj": {"act": "act", "layer_before_act": "fc_in"},
"gpt_neox": {"act": "act", "layer_before_act": "dense_h_to_4h"},
"gpt_bigcode": {"act": "act", "layer_before_act": "c_fc"},
"bloom": {"act": "gelu_impl", "layer_before_act": "dense_h_to_4h"},
}
def replace_quantization_scales(model, model_type):
from gptqmodel.quantization.awq.modules.act import ScaledActivation
if model_type not in AWQ_SCALES_MAPPINGS:
return model
for name, module in model.named_children():
act_name = AWQ_SCALES_MAPPINGS[model_type]["act"]
layer_before_act_name = AWQ_SCALES_MAPPINGS[model_type]["layer_before_act"]
if name == act_name and hasattr(model, layer_before_act_name):
layer_before_act = getattr(model, AWQ_SCALES_MAPPINGS[model_type]["layer_before_act"])
size = layer_before_act.out_features
scale_like = torch.ones(size)
model._modules[name] = ScaledActivation(module, scale_like)
_ = replace_quantization_scales(module, model_type)
return model
def replace_with_awq_linear(
model,
modules_to_not_convert=None,
quantization_config=None,
device_map: Optional[Union[str, dict]] = None,
) -> bool:
"""
Public method that replaces the linear layers of the given model with awq quantized layers.
Args:
model (`torch.nn.Module`):
The model to convert, can be any `torch.nn.Module` instance.
quantization_config (`AwqConfig`):
The quantization config object that contains the quantization parameters.
modules_to_not_convert (`list[str]`, *optional*, defaults to `None`):
A list of nn.Linear weights to not convert. If a parameter path is in the list (e.g. `lm_head.weight`), the corresponding module will not be
converted.
device_map (`Union[str, dict]`, *optional*, defaults to `None`):
The device map that maps the parameters to the device
"""
from gptqmodel.quantization import METHOD
from gptqmodel.utils.importer import hf_select_quant_linear_v2
target_cls = hf_select_quant_linear_v2(
bits=quantization_config.bits,
group_size=quantization_config.group_size,
desc_act=False,
sym=False,
format=quantization_config.format,
backend=quantization_config.backend,
device_map=device_map,
quant_method=METHOD.AWQ,
zero_point=quantization_config.zero_point,
pack=False,
)
for module_name, module in model.named_modules():
if not should_convert_module(module_name, modules_to_not_convert):
continue
with torch.device("meta"):
if isinstance(module, nn.Linear):
new_module = target_cls(
bits=quantization_config.bits,
sym=quantization_config.sym,
desc_act=quantization_config.desc_act,
group_size=quantization_config.group_size,
in_features=module.in_features,
out_features=module.out_features,
bias=module.bias is not None,
dev=module.weight.device,
register_buffers=True,
)
new_module.requires_grad_(False)
model.set_submodule(module_name, new_module)
has_been_replaced = True
if not has_been_replaced:
logger.warning(
"You are loading your model using eetq but no linear modules were found in your model."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug."
)
return model
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/integrations/torchao.py | src/transformers/integrations/torchao.py | # coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib.metadata
import re
import types
from typing import Optional
import torch
from packaging import version
from transformers.utils import logging
from transformers.utils.import_utils import is_torch_available, is_torchao_available
if is_torch_available():
from ..core_model_loading import ConversionOps
from ..quantizers.quantizers_utils import get_module_from_name
if is_torchao_available():
TORCHAO_VERSION = version.parse(importlib.metadata.version("torchao"))
if version.parse(importlib.metadata.version("torchao")) >= version.parse("0.15.0"):
from torchao.prototype.safetensors.safetensors_support import (
unflatten_tensor_state_dict,
)
from torchao.prototype.safetensors.safetensors_utils import is_metadata_torchao
logger = logging.get_logger(__name__)
def fuzzy_match_size(config_name: str) -> Optional[str]:
"""
Extract the size digit from strings like "4weight", "8weight".
Returns the digit as an integer if found, otherwise None.
"""
config_name = config_name.lower()
str_match = re.search(r"(\d)weight", config_name)
if str_match:
return str_match.group(1)
return None
def _quantization_type(weight):
from torchao.dtypes import AffineQuantizedTensor
from torchao.quantization.linear_activation_quantized_tensor import LinearActivationQuantizedTensor
if isinstance(weight, AffineQuantizedTensor):
return f"{weight.__class__.__name__}({weight._quantization_type()})"
if isinstance(weight, LinearActivationQuantizedTensor):
return f"{weight.__class__.__name__}(activation={weight.input_quant_func}, weight={_quantization_type(weight.original_weight_tensor)})"
def _linear_extra_repr(self):
weight = _quantization_type(self.weight)
if weight is None:
return f"in_features={self.weight.shape[1]}, out_features={self.weight.shape[0]}, weight=None"
else:
return f"in_features={self.weight.shape[1]}, out_features={self.weight.shape[0]}, weight={weight}"
class TorchAoQuantize(ConversionOps):
def __init__(self, hf_quantizer):
self.hf_quantizer = hf_quantizer
def convert(
self,
input_dict: dict[str, torch.Tensor],
model: Optional[torch.nn.Module] = None,
full_layer_name: str | None = None,
missing_keys=None,
**kwargs,
) -> dict[str, torch.Tensor]:
from torchao.quantization import quantize_
_, value = tuple(input_dict.items())[0]
value = value[0] if isinstance(value, list) else value
module, tensor_name = get_module_from_name(model, full_layer_name)
module._parameters[tensor_name] = torch.nn.Parameter(value, requires_grad=value.requires_grad)
# if we are quantizing tied parameters, to avoid tying the quantized weights
# the correct order to do it is
# 1. load the weight to model
# 2. run tie_weights to populate the weights
# 3. quantize
input_embed = model.get_input_embeddings()
is_embedding_param = id(module) == id(input_embed)
untie_embedding_weights = self.hf_quantizer.quantization_config.untie_embedding_weights
if untie_embedding_weights and is_embedding_param:
setattr(model.config.get_text_config(decoder=True), "tie_word_embeddings", False)
# handle FqnToConfig, introduced in torchao 0.15.0+
if self.hf_quantizer.quantization_config._get_ao_version() >= version.Version("0.15.0"):
from torchao.quantization import FqnToConfig
config = self.hf_quantizer.quantization_config.get_apply_tensor_subclass()
if isinstance(config, FqnToConfig):
module_fqn, top_level_param_name = full_layer_name.rsplit(".", 1)
c = None
if full_layer_name in config.fqn_to_config:
assert not module_fqn.startswith("re:"), (
"param fqn should not start with`re:`, which is used for specifying regex"
)
c = config.module_fqn_to_config[full_layer_name]
elif module_fqn in config.fqn_to_config:
assert not module_fqn.startswith("re:"), (
"module fqn should not start with`re:`, which is used for specifying regex"
)
c = config.module_fqn_to_config[module_fqn]
# regex match module and param
else:
for maybe_module_fqn_pattern in config.fqn_to_config:
# if key doesn't start with re, it is an exact fqn key, so we don't regex match
if not maybe_module_fqn_pattern.startswith("re:"):
continue
# see if param matches first
elif re.fullmatch(maybe_module_fqn_pattern[3:], full_layer_name):
c = config.module_fqn_to_config[maybe_module_fqn_pattern]
break
elif re.fullmatch(maybe_module_fqn_pattern[3:], module_fqn):
# we'll apply the config for first fully matched pattern
c = config.module_fqn_to_config[maybe_module_fqn_pattern]
break
else:
c = config.module_fqn_to_config.get("_default", None)
if c is not None:
if top_level_param_name == "weight":
if is_embedding_param and untie_embedding_weights:
lm_head = module.weight.clone()
# we can apply the module config directly
quantize_(module, c, (lambda x, fqn: True))
missing_keys.discard(full_layer_name)
module._is_hf_initialized = True
return {"lm_head.weight": lm_head} if is_embedding_param and untie_embedding_weights else {}
else:
# need to apply to custom param name
custom_param_fqn_config = FqnToConfig({top_level_param_name: c})
quantize_(module, custom_param_fqn_config, filter_fn=None)
missing_keys.discard(full_layer_name)
module._is_hf_initialized = True
return {}
return {full_layer_name: value}
# handle ModuleFqnToConfig, introduced in torchao 0.12.0+
# TODO deprecate this when we deprecate ModuleFqnToConfig
elif self.hf_quantizer.quantization_config._get_ao_version() >= version.Version("0.12.0"):
from torchao.quantization import ModuleFqnToConfig
config = self.hf_quantizer.quantization_config.get_apply_tensor_subclass()
if isinstance(config, ModuleFqnToConfig):
module_fqn, _ = full_layer_name.rsplit(".", 1)
c = None
if module_fqn in config.module_fqn_to_config:
assert not module_fqn.startswith("re:"), (
"module fqn should not start with`re:`, which is used for specifying regex"
)
c = config.module_fqn_to_config[module_fqn]
else:
for maybe_module_fqn_pattern in config.module_fqn_to_config:
if not maybe_module_fqn_pattern.startswith("re:"):
continue
elif re.fullmatch(maybe_module_fqn_pattern[3:], module_fqn):
# we'll apply the config for first fully matched pattern
c = config.module_fqn_to_config[maybe_module_fqn_pattern]
break
else:
c = config.module_fqn_to_config.get("_default", None)
if c is not None:
# filter_fn: not filtering out any modules
if is_embedding_param and untie_embedding_weights:
lm_head = module.weight.clone()
quantize_(module, c, filter_fn=lambda x, fqn: True)
missing_keys.discard(full_layer_name)
module._is_hf_initialized = True
return {"lm_head.weight": lm_head} if is_embedding_param and untie_embedding_weights else {}
return {full_layer_name: value}
if is_embedding_param and untie_embedding_weights:
lm_head = module.weight.clone()
quantize_(module, self.hf_quantizer.quantization_config.get_apply_tensor_subclass())
missing_keys.discard(full_layer_name)
module._is_hf_initialized = True
return {"lm_head.weight": lm_head} if is_embedding_param and untie_embedding_weights else {}
class TorchAoDeserialize(ConversionOps):
def __init__(self, hf_quantizer):
self.hf_quantizer = hf_quantizer
def convert(
self,
input_dict: dict[str, torch.Tensor],
source_patterns: list[str] | None = None,
model: Optional[torch.nn.Module] = None,
full_layer_name: str | None = None,
missing_keys=None,
**kwargs,
) -> dict[str, torch.Tensor]:
"""
Consolidates tensor subclass components before reconstructing the object
For example:
input_dict: {
"_weight_qdata": torch.Tensor,
"_weight_scale": torch.Tensor,
}
full_layer_name: "model.layers.0.self_attn.k_proj.weight"
Given this, we reconstruct a Float8Tensor instance using the qdata and scale
and return it as a dictionary with the full_layer_name as the key and the recovered
Float8Tensor instance as the value.
"""
is_unsafe_serialization = list(input_dict.keys())[0] not in source_patterns
param_data = {}
layer_name = ".".join(full_layer_name.split(".")[:-1])
if is_unsafe_serialization:
if isinstance(input_dict["weight"], list):
weight = input_dict["weight"][0]
else:
weight = input_dict["weight"]
else:
for suffix in input_dict.keys():
if len(input_dict[suffix]) != 1:
raise ValueError(
f"Expected a single tensor for {suffix} but got {len(input_dict[suffix])} tensors instead"
)
param_data[f"{layer_name}.{suffix}"] = input_dict[suffix][0]
# If it's unsafe-serialized (i.e. not safetensors), no need for anything
if is_unsafe_serialization:
return {full_layer_name: weight}
# Sanity check for the new serialization format
elif not (TORCHAO_VERSION >= version.parse("0.15.0") and is_metadata_torchao(self.hf_quantizer.metadata)):
raise ValueError("To use `safetensors` serialization, you should have `torchao>=0.15.0` installed")
unflattened_state_dict, leftover_state_dict = unflatten_tensor_state_dict(
param_data, self.hf_quantizer.metadata
)
assert not leftover_state_dict # there should be no unprocessed tensors
new_param = unflattened_state_dict[full_layer_name]
module, _ = get_module_from_name(model, full_layer_name)
# Add repr to the module
if isinstance(module, torch.nn.Linear):
module.extra_repr = types.MethodType(_linear_extra_repr, module)
return {full_layer_name: new_param}
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/integrations/vptq.py | src/transformers/integrations/vptq.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"VPTQ (Vector Post-Training Quantization) integration file"
from ..quantizers.quantizers_utils import should_convert_module
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
import torch.nn as nn
logger = logging.get_logger(__name__)
def replace_with_vptq_linear(model, modules_to_not_convert: list[str] | None = None, quantization_config=None):
"""
Public method that replaces the Linear layers of the given model with SPQR quantized layers.
Args:
model (`torch.nn.Module`):
The model to convert, can be any `torch.nn.Module` instance.
modules_to_not_convert (`list[str]`, *optional*, defaults to `None`):
A list of nn.Linear weights to not convert. If a parameter path is in the list (e.g. `lm_head.weight`), the corresponding module will not be
converted.
quantization_config (`VptqConfig`):
The quantization config object that contains the quantization parameters.
"""
from vptq import VQuantLinear
has_been_replaced = False
shared_layer_config = quantization_config.shared_layer_config
config_for_layers = quantization_config.config_for_layers
for module_name, module in model.named_modules():
if not should_convert_module(module_name, modules_to_not_convert):
continue
with torch.device("meta"):
if isinstance(module, nn.Linear):
layer_params = config_for_layers.get(module_name, None) or shared_layer_config.get(
module_name.rsplit(".")[1], None
)
new_module = VQuantLinear(
module.in_features,
module.out_features,
vector_lens=layer_params["vector_lens"],
num_centroids=layer_params["num_centroids"],
num_res_centroids=layer_params["num_res_centroids"],
group_num=layer_params["group_num"],
group_size=layer_params["group_size"],
outlier_size=layer_params["outlier_size"],
indices_as_float=layer_params["indices_as_float"],
enable_norm=layer_params["enable_norm"],
enable_perm=layer_params["enable_perm"],
is_indice_packed=True,
enable_proxy_error=False,
bias=module.bias is not None,
)
# Force requires grad to False to avoid unexpected errors
model._modules[module_name].requires_grad_(False)
model.set_submodule(module_name, new_module)
has_been_replaced = True
if not has_been_replaced:
logger.warning(
"You are loading your model using eetq but no linear modules were found in your model."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug."
)
return model
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/integrations/quark.py | src/transformers/integrations/quark.py | # coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from ..core_model_loading import ConversionOps
from ..utils import is_torch_available
if is_torch_available():
import torch
class QuarkDeserialize(ConversionOps):
def __init__(self, hf_quantizer):
self.hf_quantizer = hf_quantizer
def convert(
self,
input_dict: torch.Tensor,
model: Optional[torch.nn.Module] = None,
missing_keys: Optional[list[str]] = None,
full_layer_name: str | None = None,
**kwargs,
) -> dict[str, torch.Tensor]:
# target_key should be in the form of weight_scale, bias_scale, input_scale, output_scale, weight_zero_point, bias_zero_point, input_zero_point, output_zero_point
target_key, value = tuple(input_dict.items())[0]
value = value[0] if isinstance(value, list) else value
# this will get the param name : weight, input, bias or output
param = target_key.split("_", 1)[0]
# quant_state should be in the form of scale, or zero_point
quant_state = target_key.split("_", 1)[-1]
# here we change the name for example from the form of :
# model.layers.0.mlp.down_proj.weight_scale to model.layers.0.mlp.down_proj.weight_quantizer.scale to fit within
# the QParamsLinear module of quark
sub_module_state = full_layer_name.rsplit(".", 1)[0] + "." + param + "_quantizer" + "." + quant_state
# since quark module was expecting keys in the form of model.layers.0.mlp.down_proj.weight_scale
# we need to remove it from the missing_keys list
missing_keys.discard(full_layer_name)
return {sub_module_state: value}
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/integrations/eager_paged.py | src/transformers/integrations/eager_paged.py | import torch
from torch import nn
from ..generation.continuous_batching.cache import PagedAttentionCache
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def eager_paged_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: torch.Tensor | None, # shape [seqlen_q, seqlen_k]
scaling: float,
**kwargs,
):
# Add KV cache to the key and value tensors
cache: PagedAttentionCache | None = kwargs.pop("cache", None)
if cache is not None:
# This changes the shape of k and v from [1, num_kv_heads, seqlen_kv, head_dim] to [-1, num_kv_heads, head_dim]
key, value = cache.update(
key_states=key,
value_states=value,
layer_idx=module.layer_idx,
read_index=kwargs["read_index"],
write_index=kwargs["write_index"],
)
key = key.transpose(0, 1).unsqueeze(0)
value = value.transpose(0, 1).unsqueeze(0)
# Repeat the key and value tensors for each group of key-value heads
if hasattr(module, "num_key_value_groups"):
key = repeat_kv(key, module.num_key_value_groups)
value = repeat_kv(value, module.num_key_value_groups)
# Get the right causal mask for the current layer
if isinstance(attention_mask, dict):
sliding_window = getattr(module, "sliding_window", 1)
layer_type = "full_attention" if sliding_window == 1 or sliding_window is None else "sliding_attention"
causal_mask = attention_mask[layer_type]
else:
causal_mask = attention_mask
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if causal_mask is not None:
attn_weights = attn_weights + causal_mask
# Handle attention sinks if the model has them
if hasattr(module, "sinks"):
# Retrieve the sink and add it to the attention weights
sinks = module.sinks.reshape(1, -1, 1, 1).expand(query.shape[0], -1, query.shape[-2], -1)
attn_weights = torch.cat([attn_weights, sinks], dim=-1)
# Normalize the attention weights for better numerical stability
attn_weights = attn_weights - attn_weights.max(dim=-1, keepdim=True).values
# Apply softmax and drop the sink. Not exactly the same code as eager w/ sink, but the same code does not produce the same results.
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = attn_weights[..., :-1]
else:
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/integrations/fp_quant.py | src/transformers/integrations/fp_quant.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"FP-Quant integration file"
from typing import Optional
import torch
from ..utils import (
is_fp_quant_available,
)
if is_fp_quant_available():
from fp_quant import FPQuantConfig as FPQuantLinearConfig
from fp_quant import FPQuantDtype
from transformers.utils.quantization_config import FPQuantConfig
from ..core_model_loading import ConversionOps
from ..quantizers.quantizers_utils import get_module_from_name
class FpQuantQuantize(ConversionOps):
def __init__(self, hf_quantizer):
self.hf_quantizer = hf_quantizer
def convert(
self,
input_dict: torch.Tensor,
model: Optional[torch.nn.Module] = None,
missing_keys: Optional[list[str]] = None,
**kwargs,
) -> dict[str, torch.Tensor]:
target_key, value = tuple(input_dict.items())[0]
value = value[0]
# Loading master weights or an unquantized checkpoint
weight = torch.nn.Parameter(value)
module, _ = get_module_from_name(model, target_key)
module.weight = weight
# Let pre-forward handle the quantization and set None where necessary
# This operation will quantize the weights internally
with torch.cuda.device(value.device):
module.pre_forward()
prefix_target_key = target_key.rsplit(".", 1)[0]
# keys are set inside the module.pre_forward() method, we don't need remove them from the missing keys list
missing_keys.discard(target_key)
missing_keys.discard(f"{prefix_target_key}.backward_hadamard_matrix")
missing_keys.discard(f"{prefix_target_key}.forward_hadamard_matrix")
missing_keys.discard(f"{prefix_target_key}.act_global_scale")
missing_keys.discard(f"{prefix_target_key}.weight_global_scale")
missing_keys.discard(f"{prefix_target_key}.qweight")
missing_keys.discard(f"{prefix_target_key}.scales")
missing_keys.discard(f"{prefix_target_key}.dqweight")
return {}
class FpQuantDeserialize(ConversionOps):
def __init__(self, hf_quantizer):
self.hf_quantizer = hf_quantizer
def convert(
self,
input_dict: torch.Tensor,
model: Optional[torch.nn.Module] = None,
full_layer_name: str | None = None,
missing_keys: Optional[list[str]] = None,
**kwargs,
) -> dict[str, torch.Tensor]:
target_key, value = tuple(input_dict.items())[0]
value = value[0] if isinstance(value, list) else value
module, _ = get_module_from_name(model, target_key)
# The module holds either:
# * `weight` when `store_master_weights=True`
# * `qweight` and `scales` when `store_master_weights=False` and `pseudoquantization=False`
# * `dqweight` when `store_master_weights=False` and `pseudoquantization=True`
if target_key == ".qweight":
# Loading a real quantized checkpoint without master weights
qweight = torch.nn.Parameter(
value,
requires_grad=False,
)
return {
".qweight": qweight,
# the way the FPQuantLinear module is designed, these parameters are expected in the model
# even though they are not used so we need to set them to zeros
".weight": torch.nn.Parameter(torch.zeros(0)),
".dqweight": torch.nn.Parameter(torch.zeros(0)),
}
if target_key == ".dqweight":
# Loading a pseudo-quantized checkpoint without master weights
dqweight = torch.nn.Parameter(value)
return {
".dqweight": dqweight,
# the way the FPQuantLinear module ips designed, these parameters are expected in the model
# even though they are not used so we need to set them to zeros
".weight": torch.nn.Parameter(torch.zeros(0)),
".qweight": torch.nn.Parameter(torch.zeros(0)),
".scales": torch.nn.Parameter(torch.zeros(0)),
}
def adapt_fp_quant_config(config: FPQuantConfig):
if config.forward_dtype == "mxfp4":
forward_dtype = FPQuantDtype.MXFP4
elif config.forward_dtype == "nvfp4":
forward_dtype = FPQuantDtype.NVFP4
else:
raise ValueError(f"Unsupported forward dtype: {config.forward_dtype}")
if config.backward_dtype == "bf16":
backward_dtype = FPQuantDtype.BF16
elif config.backward_dtype == "mxfp8":
backward_dtype = FPQuantDtype.MXFP8
elif config.backward_dtype == "mxfp4":
backward_dtype = FPQuantDtype.MXFP4
else:
raise ValueError(f"Unsupported backward dtype: {config.backward_dtype}")
return FPQuantLinearConfig(
forward_dtype=forward_dtype,
forward_method=config.forward_method,
backward_dtype=backward_dtype,
store_master_weights=config.store_master_weights,
hadamard_group_size=config.hadamard_group_size,
pseudoquantization=config.pseudoquantization,
transform_init=config.transform_init,
modules_to_not_convert=config.modules_to_not_convert,
)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/integrations/bitsandbytes.py | src/transformers/integrations/bitsandbytes.py | import inspect
from ..core_model_loading import ConversionOps
from ..quantizers.quantizers_utils import get_module_from_name, should_convert_module
from ..utils import (
get_available_devices,
is_accelerate_available,
is_bitsandbytes_available,
is_torch_available,
logging,
)
if is_bitsandbytes_available():
import bitsandbytes as bnb
if is_torch_available():
import torch
import torch.nn as nn
from ..pytorch_utils import Conv1D
if is_accelerate_available():
import accelerate
from accelerate.hooks import add_hook_to_module, remove_hook_from_module
logger = logging.get_logger(__name__)
class Bnb4bitQuantize(ConversionOps):
def __init__(self, hf_quantizer):
self.hf_quantizer = hf_quantizer
def convert(
self,
input_dict: dict[str, list[torch.Tensor]],
full_layer_name: str | None = None,
model: torch.nn.Module | None = None,
**kwargs,
) -> dict[str, torch.Tensor]:
"""
we need to store some parameters to create the quantized weight. For example, bnb requires 6 values that are stored in the checkpoint to recover the quantized weight. So we store them in a dict that it stored in hf_quantizer for now as we can't save it in the op since we create an op per tensor.
"""
value = list(input_dict.values())[0]
value = value[0]
# update param name to get the weights instead of the quantized stats
module, _ = get_module_from_name(model, full_layer_name)
# Support models using `Conv1D` in place of `nn.Linear` (e.g. openai-community/gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls, Conv1D):
value = value.T
old_value = model.get_parameter_or_buffer(full_layer_name)
new_value = bnb.nn.Params4bit(value, requires_grad=False, **old_value.__dict__).to(value.device)
module._is_hf_initialized = True
return {full_layer_name: new_value}
class Bnb4bitDeserialize(ConversionOps):
def __init__(self, hf_quantizer):
self.hf_quantizer = hf_quantizer
def convert(
self,
input_dict: dict[str, list[torch.Tensor]],
model: torch.nn.Module | None = None,
full_layer_name: str | None = None,
**kwargs,
) -> dict[str, torch.Tensor]:
"""
Deserialization of bnb keys. We need 6 keys to recreate the quantized weights
"""
if len(input_dict) == 1:
return input_dict
for key, value in input_dict.items():
if isinstance(value, list):
input_dict[key] = value[0]
key_weight = "weight"
weight = input_dict.pop(key_weight)
module, _ = get_module_from_name(model, full_layer_name)
new_value = bnb.nn.Params4bit.from_prequantized(
data=weight,
quantized_stats=input_dict,
requires_grad=False,
device=weight.device,
module=module,
)
module._is_hf_initialized = True
return {key_weight: new_value}
class Bnb8bitQuantize(ConversionOps):
def __init__(self, hf_quantizer):
self.hf_quantizer = hf_quantizer
def convert(
self,
input_dict: dict[str, list[torch.Tensor]],
model: torch.nn.Module | None = None,
full_layer_name: str | None = None,
**kwargs,
) -> dict[str, torch.Tensor]:
value = list(input_dict.values())[0]
value = value[0] if isinstance(value, list) else value
module, _ = get_module_from_name(model, full_layer_name)
# Support models using `Conv1D` in place of `nn.Linear` (e.g. openai-community/gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls, Conv1D):
value = value.T
value_device = value.device
kwargs = model.get_parameter_or_buffer(full_layer_name).__dict__
kwargs.pop("SCB", None)
new_value = bnb.nn.Int8Params(value.to("cpu"), requires_grad=False, **kwargs).to(value_device)
return {full_layer_name: new_value}
class Bnb8bitDeserialize(ConversionOps):
def __init__(self, hf_quantizer):
self.hf_quantizer = hf_quantizer
def convert(
self,
input_dict: dict[str, list[torch.Tensor]],
model: torch.nn.Module | None = None,
full_layer_name: str | None = None,
**kwargs,
) -> dict[str, torch.Tensor]:
"""
Deserialization of bnb keys.
"""
if len(input_dict) == 1:
# special case when we only fetched the weight
# since we collected keys, we need to return it like that
return input_dict
for key, value in input_dict.items():
if isinstance(value, list):
input_dict[key] = value[0]
module, _ = get_module_from_name(model, full_layer_name)
key_weight = "weight"
weight = input_dict[key_weight]
kwargs = model.get_parameter_or_buffer(full_layer_name).__dict__
kwargs["SCB"] = input_dict["SCB"]
new_value = bnb.nn.Int8Params(weight, requires_grad=False, **kwargs).to(weight.device)
module._is_hf_initialized = True
return {key_weight: new_value}
def replace_with_bnb_linear(
model: torch.nn.Module,
modules_to_not_convert: list[str] | None = None,
quantization_config=None,
pre_quantized=False,
):
"""
A helper function to replace all `torch.nn.Linear` modules by bnb modules from the `bitsandbytes` library.
Args:
model (`torch.nn.Module`):
The model to convert, can be any `torch.nn.Module` instance.
modules_to_not_convert (`list[str]`, defaults to `None`):
A list of nn.Linear weights to not convert. If a parameter path is in the list (e.g. `lm_head.weight`), the corresponding module will not be
converted.
quantization_config (`BitsAndBytesConfig`):
The quantization config object that contains the quantization parameters.
pre_quantized (`book`, defaults to `False`):
Whether the model is pre-quantized or not
"""
has_been_replaced = False
# we need this to correctly materialize the weights during quantization
for module_name, module in model.named_modules():
if not should_convert_module(module_name, modules_to_not_convert):
continue
new_module = None
with torch.device("meta"):
if isinstance(module, (nn.Linear, Conv1D)):
if isinstance(module, Conv1D):
in_features, out_features = module.weight.shape
else:
in_features = module.in_features
out_features = module.out_features
if quantization_config.quantization_method() == "llm_int8":
new_module = bnb.nn.Linear8bitLt(
in_features,
out_features,
module.bias is not None,
has_fp16_weights=quantization_config.llm_int8_has_fp16_weight,
threshold=quantization_config.llm_int8_threshold,
)
if pre_quantized:
# this is kind of an edge case when supporting both loading and quantization ...
# we need to set the right dtype as we cast the checkpoint with the dtype of the meta model
new_module.weight.data = new_module.weight.data.to(dtype=torch.int8)
else:
new_module = bnb.nn.Linear4bit(
in_features,
out_features,
module.bias is not None,
quantization_config.bnb_4bit_compute_dtype,
compress_statistics=quantization_config.bnb_4bit_use_double_quant,
quant_type=quantization_config.bnb_4bit_quant_type,
quant_storage=quantization_config.bnb_4bit_quant_storage,
)
if pre_quantized:
# same here
new_module.weight.data = new_module.weight.data.to(
dtype=quantization_config.bnb_4bit_quant_storage
)
if new_module is not None:
# Store the module class in case we need to transpose the weight later
new_module.source_cls = type(module)
# Force requires grad to False to avoid unexpected errors
new_module.requires_grad_(False)
model.set_submodule(module_name, new_module)
has_been_replaced = True
if not has_been_replaced:
logger.warning(
"You are loading your model using eetq but no linear modules were found in your model."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug."
)
return model
# Copied from PEFT: https://github.com/huggingface/peft/blob/47b3712898539569c02ec5b3ed4a6c36811331a1/src/peft/utils/integrations.py#L41
def dequantize_bnb_weight(weight: "torch.nn.Parameter", state=None):
"""
Helper function to dequantize 4bit or 8bit bnb weights.
If the weight is not a bnb quantized weight, it will be returned as is.
"""
if not isinstance(weight, torch.nn.Parameter):
raise TypeError(f"Input weight should be of type nn.Parameter, got {type(weight)} instead")
cls_name = weight.__class__.__name__
if cls_name not in ("Params4bit", "Int8Params"):
return weight
if cls_name == "Params4bit":
output_tensor = bnb.functional.dequantize_4bit(weight.data, weight.quant_state)
return output_tensor
if state.SCB is None:
state.SCB = weight.SCB
if hasattr(bnb.functional, "int8_vectorwise_dequant"):
# Use bitsandbytes API if available (requires v0.45.0+)
dequantized = bnb.functional.int8_vectorwise_dequant(weight.data, state.SCB)
else:
# Multiply by (scale/127) to dequantize.
dequantized = weight.data * state.SCB.view(-1, 1) * 7.874015718698502e-3
return dequantized
def _create_accelerate_new_hook(old_hook):
r"""
Creates a new hook based on the old hook. Use it only if you know what you are doing !
This method is a copy of: https://github.com/huggingface/peft/blob/748f7968f3a31ec06a1c2b0328993319ad9a150a/src/peft/utils/other.py#L245
with some changes
"""
old_hook_cls = getattr(accelerate.hooks, old_hook.__class__.__name__)
old_hook_attr = old_hook.__dict__
filtered_old_hook_attr = {}
old_hook_init_signature = inspect.signature(old_hook_cls.__init__)
for k in old_hook_attr:
if k in old_hook_init_signature.parameters:
filtered_old_hook_attr[k] = old_hook_attr[k]
new_hook = old_hook_cls(**filtered_old_hook_attr)
return new_hook
def dequantize_and_replace(model, quantization_config=None, dtype=None):
"""
Converts a quantized model into its dequantized original version. The newly converted model will have
some performance drop compared to the original model before quantization - use it only for specific usecases
such as QLoRA adapters merging.
Returns the converted model.
"""
quant_method = quantization_config.quantization_method()
target_cls = bnb.nn.Linear8bitLt if quant_method == "llm_int8" else bnb.nn.Linear4bit
for module_name, module in model.named_modules():
if isinstance(module, target_cls):
with torch.device("meta"):
bias = getattr(module, "bias", None)
new_module = torch.nn.Linear(module.in_features, module.out_features, bias=bias is not None)
state = module.state if quant_method == "llm_int8" else None
new_module.weight = torch.nn.Parameter(dequantize_bnb_weight(module.weight, state))
weight = dequantize_bnb_weight(module.weight, state)
if dtype is None:
logger.warning_once(
f"The modules are dequantized in {weight.dtype}. If you want to change the dtype, please specify `dtype` in `dequantize`. "
)
else:
logger.warning_once(f"The modules are dequantized in {weight.dtype} and casted to {dtype}.")
weight = weight.to(dtype)
new_module.weight = torch.nn.Parameter(weight)
if bias is not None:
new_module.bias = bias
if hasattr(module, "_hf_hook"):
old_hook = module._hf_hook
new_hook = _create_accelerate_new_hook(old_hook)
remove_hook_from_module(module)
add_hook_to_module(new_module, new_hook)
new_module.to(module.weight.device)
model.set_submodule(module_name, new_module)
has_been_replaced = True
if not has_been_replaced:
logger.warning(
"For some reason the model has not been properly dequantized. You might see unexpected behavior."
)
return model
def validate_bnb_backend_availability(raise_exception=False):
"""
Validates if the available devices are supported by bitsandbytes, optionally raising an exception if not.
"""
bnb_supported_devices = getattr(bnb, "supported_torch_devices", set())
available_devices = set(get_available_devices())
if not available_devices.intersection(bnb_supported_devices):
if raise_exception:
err_msg = (
f"None of the available devices `available_devices = {available_devices or None}` are supported by the bitsandbytes version you have installed: `bnb_supported_devices = {bnb_supported_devices}`. "
"Please check the docs to see if the backend you intend to use is available and how to install it: https://huggingface.co/docs/bitsandbytes/main/en/installation"
)
raise RuntimeError(err_msg)
logger.warning("No supported devices found for bitsandbytes")
return False
return True
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/integrations/flex_attention.py | src/transformers/integrations/flex_attention.py | """
Partially inspired by torchtune's flex attention implementation
Citation:
@software{torchtune,
title = {torchtune: PyTorch's finetuning library},
author = {torchtune maintainers and contributors},
url = {https//github.com/pytorch/torchtune},
license = {BSD-3-Clause},
month = apr,
year = {2024}
}
"""
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union
import torch
from packaging import version
from ..utils import is_torch_flex_attn_available, logging
from ..utils.import_utils import get_torch_version, is_torch_less_or_equal, is_torchdynamo_compiling
if is_torch_flex_attn_available():
from torch.nn.attention.flex_attention import _DEFAULT_SPARSE_BLOCK_SIZE as flex_default_block_size
from torch.nn.attention.flex_attention import BlockMask, create_block_mask, flex_attention
logger = logging.get_logger(__name__)
class WrappedFlexAttention:
"""
We are doing a singleton class so that flex attention is compiled once when it's first called.
"""
_instance = None
_is_flex_compiled = False
_compiled_flex_attention = None
def __new__(cls, *args, **kwargs):
if cls._instance is None:
# Create a new instance if one doesn't already exist
cls._instance = super().__new__(cls)
return cls._instance
@torch.compiler.disable(recursive=False)
def __init__(self, training):
"""
Initialize or update the singleton instance.
"""
if not self._is_flex_compiled or training != self.training:
self.training = training
if is_torch_less_or_equal("2.5.1"):
self._compiled_flex_attention = torch.compile(flex_attention, dynamic=False)
# In PyTorch 2.6.0, there's a known issue with flex attention compilation which may
# cause errors. The suggested fix is to compile with "max-autotune-no-cudagraphs"
# see https://github.com/pytorch/pytorch/issues/146260 for training
elif version.parse(get_torch_version()).base_version == "2.6.0" and training:
self._compiled_flex_attention = torch.compile(
flex_attention, dynamic=False, mode="max-autotune-no-cudagraphs"
)
# Fallback, usually the most recent torch 2.7.x+ versions
else:
self._compiled_flex_attention = torch.compile(flex_attention)
self._is_flex_compiled = True
def __call__(self):
return self._compiled_flex_attention
def compile_friendly_flex_attention(
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
training=False,
**kwargs,
) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
# First call initialise singleton wrapper object, second call invokes the object method to return compiled flex attention
# Do not use compiled version if already compiling forward (it raises issues)
flex_attention_compiled = WrappedFlexAttention(training)() if not is_torchdynamo_compiling() else flex_attention
return flex_attention_compiled(
query,
key,
value,
**kwargs,
)
Offset = Union[torch.Tensor, int]
# TODO: deprecate / rename to make_flex_block_mask for clarity as it's not only causal anymore
def make_flex_block_causal_mask(
attention_mask_2d: torch.Tensor,
attention_chunk_size: int | None = None,
query_length=None,
key_length=None,
offsets: tuple[Offset, Offset] | None = None,
is_causal: bool | None = True,
) -> "BlockMask":
"""
IMPORTANT NOTICE: This function is deprecated in favor of using the mask primitives in `masking_utils.py`,
and will be removed in a future version without warnings. New code should not use it. It is only kept here
for BC for now, while models using it are being patched accordingly.
Create a block (causal) document mask for a batch of sequences, both packed and unpacked.
Create Block (causal) logic and passing it into :func:`torch.nn.attention.flex_attention.create_block_mask`.
The resultant BlockMask is a compressed representation of the full (causal) block
mask. BlockMask is essential for performant computation of flex attention.
See: https://pytorch.org/blog/flexattention/
Args:
attention_mask_2d (torch.Tensor): Attention mask for packed and padded sequences
of shape (batch_size, total_seq_len). e.g.
For unpacked sequence:
[[1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0]]
For packed sequence:
[[1, 1, 1, 2, 2, 2, 0],
[1, 1, 2, 2, 2, 3, 3]]
Returns:
BlockMask
"""
batch_size, total_seq_len = attention_mask_2d.shape
if not key_length:
key_length = total_seq_len
if not query_length:
query_length = total_seq_len
# older torch (2.5.x) cannot handle sequences not in multiples of 128 (default block size)
pad_len = ((key_length // flex_default_block_size) + 1) * flex_default_block_size
attention_mask_2d = torch.nn.functional.pad(attention_mask_2d, value=0, pad=(0, pad_len - key_length))
device = attention_mask_2d.device
document_ids = attention_mask_2d.clone()
if attention_chunk_size is not None:
# we create an arange, then we just // by chunk size to get [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]
chunk_idxs = (document_ids.clone().fill_(1).cumsum(-1) - 1) // (attention_chunk_size)
# Instead of passing a tensor mask, flex attention requires a mask_mod function
# that determines which elements of QK^T should be included in the attention
# computation prior to the softmax. For sample packing, we need both the
# logic for both causal mask and document mask. See PyTorch's official
# blog post for more details: https://pytorch.org/blog/flexattention/#mask-mods
def causal_mask_mod(batch_idx, head_idx, q_idx, kv_idx):
"""
Defines the logic of a block causal mask by combining both a standard causal mask
and a block diagonal document mask.
See :func:`~torchtune.modules.attention_utils.create_block_causal_mask`
for an illustration.
"""
causal_mask = q_idx >= kv_idx # not valid when decoding
document_mask = document_ids[batch_idx, q_idx] == document_ids[batch_idx, kv_idx]
padding_mask = attention_mask_2d[batch_idx, q_idx] > 0
final_mask = causal_mask & padding_mask & document_mask
return final_mask
def chunk_causal_mask_mod(batch_idx, head_idx, q_idx, kv_idx):
"""
Combines the chunk mask with the causal mask for chunked attention.
"""
chunk_mask = chunk_idxs[batch_idx, q_idx] == chunk_idxs[batch_idx, kv_idx]
causal_doc_mask = causal_mask_mod(batch_idx, head_idx, q_idx, kv_idx)
return chunk_mask & causal_doc_mask
def default_mask_mod(batch_idx, head_idx, q_idx, kv_idx):
"""
Utilizes default attention mask to enable encoder and encoder-decoder
attention masks.
"""
document_mask = document_ids[batch_idx, q_idx] == document_ids[batch_idx, kv_idx]
# kv indexing is crucial in order to work correctly
padding_mask = attention_mask_2d[batch_idx, kv_idx] > 0
final_mask = padding_mask & document_mask
return final_mask
if not is_causal:
mask_mod_maybe_combined = default_mask_mod
else:
mask_mod_maybe_combined = causal_mask_mod if attention_chunk_size is None else chunk_causal_mask_mod
if offsets is not None:
q_offset = offsets[0].to(device)
kv_offset = offsets[1].to(device)
def mask_mod(batch_idx, head_idx, q_idx, kv_idx):
offset_q = q_idx + q_offset
offset_kv = kv_idx + kv_offset
return mask_mod_maybe_combined(batch_idx, head_idx, offset_q, offset_kv)
else:
mask_mod = mask_mod_maybe_combined
return create_block_mask(
mask_mod=mask_mod,
B=batch_size,
H=None, # attention head
Q_LEN=query_length,
KV_LEN=key_length,
device=device,
# compiling the mask is not BC with older torch
_compile=not is_torch_less_or_equal("2.5.1"),
)
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def flex_attention_forward(
module: torch.nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Union[torch.Tensor, "BlockMask"],
scaling: float | None = None,
softcap: float | None = None,
s_aux: torch.Tensor | None = None,
**kwargs,
) -> tuple[torch.Tensor, torch.Tensor | None]:
if kwargs.get("dropout", 0.0) > 0:
raise ValueError(
"`flex_attention` does not support `dropout`. Please use it with inference"
" only (`model.eval()`) or turn off the attention dropout in the respective config."
)
block_mask = None
score_mask = None
if isinstance(attention_mask, BlockMask):
block_mask = attention_mask
else:
score_mask = attention_mask
if score_mask is not None:
score_mask = score_mask[:, :, :, : key.shape[-2]]
def score_mod(score, batch_idx, head_idx, q_idx, kv_idx):
if softcap is not None:
score = softcap * torch.tanh(score / softcap)
if score_mask is not None:
score = score + score_mask[batch_idx][0][q_idx][kv_idx]
# Note: attention sinks cannot be correctly implemented in score_mod
# because it requires operating on the full attention matrix before softmax.
# ==> this is done after flex attention
return score
enable_gqa = True
num_local_query_heads = query.shape[1]
# When running TP this helps:
if (num_local_query_heads & (num_local_query_heads - 1)) != 0:
key = repeat_kv(key, query.shape[1] // key.shape[1])
value = repeat_kv(value, query.shape[1] // value.shape[1])
enable_gqa = False
kernel_options = kwargs.get("kernel_options")
# On CPU we must skip returning LSE due to a runtime issue; elsewhere, follow PyTorch API and return it
return_lse = query.device.type != "cpu"
if not return_lse and s_aux is not None:
raise ValueError(
"Attention sinks cannot be run on CPU with flex attention. Please switch to a different device, e.g. CUDA"
)
flex_attention_output = compile_friendly_flex_attention(
query,
key,
value,
score_mod=score_mod,
block_mask=block_mask,
enable_gqa=enable_gqa,
scale=scaling,
kernel_options=kernel_options,
# Last time checked on PyTorch == 2.5.1: Flex Attention always computes the lse regardless.
# For simplification, we thus always return it as no additional computations are introduced.
return_lse=return_lse,
training=module.training,
)
# lse is returned in float32
if return_lse:
attention_output, lse = flex_attention_output # type: ignore[misc]
lse = lse.to(value.dtype)
if s_aux is not None:
# Apply attention sinks by renormalizing using LSE
batch_size, num_heads, seq_len_q, _ = attention_output.shape # batch, num_heads, seq_len, head_dim
sinks = s_aux.view(1, -1, 1, 1).expand(batch_size, num_heads, seq_len_q, 1)
# We need to compute the normalization that includes the sinks
# since log(sum(exp(scores))) = lse, exp(log(sum(exp(scores)))) = exp(lse)
# NB: log(sum(exp(scores)) + exp(sink)) = log(exp(lse) + exp(sink))
lse_expanded = lse.unsqueeze(-1) # [batch, num_heads, seq_len, 1]
combined_lse = torch.logsumexp(torch.cat([lse_expanded, sinks], dim=-1), dim=-1, keepdim=True)
# Use new_norm / old_norm = exp(combined_lse - lse) to compute renorm and apply
renorm_factor = torch.exp(lse_expanded - combined_lse)
attention_output = attention_output * renorm_factor
else:
attention_output = flex_attention_output # type: ignore[assignment]
lse = None
attention_output = attention_output.transpose(1, 2).contiguous()
return attention_output, lse
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/integrations/hqq.py | src/transformers/integrations/hqq.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"HQQ (Half-Quadratic Quantization) integration file"
from ..utils import is_hqq_available, is_torch_available, logging
if is_torch_available():
import torch
logger = logging.get_logger(__name__)
# Name all modules inside the model
def autoname_modules(model):
for name, module in model.named_modules():
module.name = name
# Get the linear_tag from a module name. For example: model.layers.31.self_attn.k_proj -> self_attn.k_proj
def name_to_linear_tag(name):
return ".".join([n for n in name.split(".") if ((n not in ["model", "layers"]) and (not n.isnumeric()))])
# Get all linear tags available
def get_linear_tags(model):
if is_hqq_available():
from hqq.core.quantize import HQQLinear
linear_tags = set()
for name, module in model.named_modules():
if isinstance(module, (torch.nn.Linear, HQQLinear)):
linear_tags.add(name_to_linear_tag(name))
return list(linear_tags)
def _prepare_for_hqq_linear(model, patch_params, has_been_replaced, current_key_name=None):
for name, module in model.named_children():
if current_key_name is None:
current_key_name = []
current_key_name.append(name)
if isinstance(module, torch.nn.Linear):
# Get linear tag
linear_tag = name_to_linear_tag(module.name)
# We put the module quant_config into the nn.Linear layer so we can access it later in quantizer_hqq.create_quantized_param()
if linear_tag in patch_params:
if patch_params[linear_tag] is not None:
model._modules[name].quant_config = patch_params[linear_tag]
# Store the module class in case we need to transpose the weight later
model._modules[name].source_cls = type(module)
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(False)
has_been_replaced = True
# Add these fake parameters to avoid loading fail
for att in ["W_q", "meta"]:
setattr(module, att, None)
if len(list(module.children())) > 0:
_, has_been_replaced = _prepare_for_hqq_linear(
module,
patch_params=patch_params,
has_been_replaced=has_been_replaced,
)
# Remove the last key for recursion
current_key_name.pop(-1)
return model, has_been_replaced
def prepare_for_hqq_linear(model, quantization_config=None, modules_to_not_convert=None, has_been_replaced=False):
"""
Prepares nn.Linear layers for HQQ quantization.
Since each layer type can have separate quantization parameters, we need to do the following:
1- tag each module with its name via autoname_modules()
2- Extract linear_tags (e.g. ['self_attn.q_proj', ...])
3- Map quantization parameters as a dictionary linear_tag -> quant_params as HQQLinear expects it, this is referred to as patch_params
"""
modules_to_not_convert = [] if modules_to_not_convert is None else modules_to_not_convert
# Add name to module
autoname_modules(model)
# Get linear tags. This allows us to use different quant params to different layer types
linear_tags = get_linear_tags(model)
# Convert quantization_config to layer-wise config
skip_modules = quantization_config.skip_modules
quant_config = quantization_config.quant_config
linear_tags = list(set(linear_tags) - set(skip_modules) - set(modules_to_not_convert))
if any(key in linear_tags for key in quant_config):
# If the user doesn't specify a key from get_linear_tags, the layer is not quantized via (key, None)
patch_params = dict.fromkeys(linear_tags)
patch_params.update(quant_config)
else:
# Same quant_config for all layers
patch_params = dict.fromkeys(linear_tags, quant_config)
model, has_been_replaced = _prepare_for_hqq_linear(
model, patch_params=patch_params, has_been_replaced=has_been_replaced
)
# We store quantization config as linear_tag -> hqq quant config
model.config.quantization_config = {
"quant_config": quant_config,
"quant_method": quantization_config.quant_method,
"skip_modules": skip_modules,
}
if not has_been_replaced:
logger.warning("No linear modules were found in your model for quantization.")
return model
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/integrations/bitnet.py | src/transformers/integrations/bitnet.py | from ..quantizers.quantizers_utils import should_convert_module
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
import torch.nn as nn
import torch.nn.functional as F
logger = logging.get_logger(__name__)
# the weights are ternary so can be represented with 2 bits, and they are packed in uint8 tensors, hence the number of values per item is 4
VALUES_PER_ITEM = 4
def pack_weights(quantized_weights: torch.Tensor) -> torch.Tensor:
"""
Packs a tensor of quantized weights into a compact format using 2 bits per value.
Parameters:
-----------
quantized_weights : torch.Tensor
A tensor containing ternary quantized weights with values in {-1, 0, 1}. These values are adjusted to
{0, 1, 2} before being packed.
Returns:
--------
torch.Tensor
A packed tensor where each element stores 4 quantized values (each using 2 bits) in an 8-bit format.
"""
original_shape = quantized_weights.shape
row_dim = (original_shape[0] + VALUES_PER_ITEM - 1) // VALUES_PER_ITEM
if len(original_shape) == 1:
packed_tensor_shape = (row_dim,)
else:
packed_tensor_shape = (row_dim, *original_shape[1:])
quantized_weights += 1
packed = torch.zeros(packed_tensor_shape, device=quantized_weights.device, dtype=torch.uint8)
unpacked = quantized_weights.to(torch.uint8)
it = min(VALUES_PER_ITEM, (original_shape[0] // row_dim) + 1)
for i in range(it):
start = i * row_dim
end = min(start + row_dim, original_shape[0])
packed[: (end - start)] |= unpacked[start:end] << 2 * i
return packed
@torch.compile
def unpack_weights(packed: torch.Tensor, dtype: torch.dtype) -> torch.Tensor:
"""
Unpacks a tensor of quantized weights that were stored in a packed format using 2 bits per value.
Parameters:
-----------
packed : torch.Tensor
A tensor containing packed weights where each element represents 4 quantized values (using 2 bits per value).
dtype : torch.dtype
The dtype of the returned Tensor
Returns:
--------
torch.Tensor
A tensor of unpacked weights, where each value is converted from its packed 2-bit representation.
Example:
--------
packed = torch.tensor([[0b10100001, 0b00011000],
[0b10010000, 0b00001010]], dtype=torch.uint8)
# Unpack the values
unpacked = unpack_weights(packed)
# Resulting unpacked tensor
print(unpacked)
# Output: tensor([[ 0, -1],
[-1, 1],
[-1, 1],
[-1, 1],
[ 1, 0],
[ 0, -1],
[ 1, -1],
[ 1, -1]])
Explanation of the example:
---------------------------
Let's take the first value for example 0b10100001, we will only focus on the first column,
because every element is unpacked across the first dimension
- First 2 bits: `01` → 0 at [0][0]
- Second 2 bits: `00` → -1 at [0][2]
- Third 2 bits: `10` → 1 at [0][4]
- Fourth 2 bits: `10` → 1 at [0][6]
the second value of the same row (0b10010000) will give the values for [0][1], [0][3], [0][5], [0][7]
We subtract 1 because during the packing process, it's easier to work with values like 0, 1, and 2. To make this possible,
we add 1 to the original ternary weights (which are typically -1, 0, and 1) when packing them. When unpacking, we reverse
this by subtracting 1 to restore the original ternary values.
"""
packed_shape = packed.shape
if len(packed_shape) == 1:
original_row_dim = packed_shape[0] * VALUES_PER_ITEM
unpacked_shape = (original_row_dim,)
else:
original_row_dim = packed_shape[0] * VALUES_PER_ITEM
unpacked_shape = (original_row_dim, *packed_shape[1:])
unpacked = torch.zeros(unpacked_shape, device=packed.device, dtype=torch.uint8)
for i in range(VALUES_PER_ITEM):
start = i * packed_shape[0]
end = start + packed_shape[0]
mask = 3 << (2 * i)
unpacked[start:end] = (packed & mask) >> (2 * i)
return unpacked.to(dtype) - 1
class BitLinear(nn.Module):
def __init__(
self,
in_features: int,
out_features: int,
bias: bool,
device=None,
dtype=None,
use_rms_norm: bool = False,
rms_norm_eps: float = 1e-6,
):
super().__init__()
self.dtype = dtype
self.in_features = in_features
self.out_features = out_features
self.register_buffer(
"weight",
torch.zeros(
(out_features // VALUES_PER_ITEM, in_features),
dtype=torch.uint8,
device=device,
),
)
self.register_buffer(
"weight_scale",
torch.ones(
(1),
dtype=dtype,
device=device,
),
)
if bias:
self.register_buffer("bias", torch.zeros((out_features), dtype=dtype, device=device))
else:
self.bias = None
# Optional RMSNorm (applied on the activations before quantization).
self.rms_norm = None
if use_rms_norm:
from ..models.llama.modeling_llama import LlamaRMSNorm
self.rms_norm = LlamaRMSNorm(in_features, eps=rms_norm_eps)
@torch.compile
def activation_quant(self, input, num_bits=8):
"""
Activation function : Performs symmetric, per-token quantization on the input activations.
Parameters:
-----------
input : torch.Tensor
Input activations to be quantized.
num_bits : int, optional (default=8)
Number of bits to use for quantization, determining the quantization range.
Returns:
--------
result : torch.Tensor
Quantized activation tensor, with values mapped to an `int8` range.
scale : torch.Tensor
The per-channel scaling factors used to quantize the tensor.
"""
Qn = -(2 ** (num_bits - 1))
Qp = 2 ** (num_bits - 1) - 1
scale = Qp / input.abs().max(dim=-1, keepdim=True).values.clamp(min=1e-5)
result = (input * scale).round().clamp(Qn, Qp)
return result.to(torch.int8), scale
@torch.compile
def post_quant_process(self, input, input_scale, weight_scale):
out = input / (input_scale * weight_scale)
return out
def forward(self, input):
# Apply RMSNorm on the input if requested.
if self.rms_norm is not None:
input = self.rms_norm(input)
w = self.weight
w_quant = unpack_weights(w, dtype=self.dtype)
input_quant, input_scale = self.activation_quant(input)
y = F.linear(input_quant.to(self.dtype), w_quant)
y = self.post_quant_process(y, self.weight_scale, input_scale)
if self.bias is not None:
y += self.bias.view(1, -1).expand_as(y)
return y
class WeightQuant(torch.autograd.Function):
"""
Implements a custom autograd function for weight quantization.
This performs ternary quantization (-1, 0, 1) based on scaling by the
mean absolute value of the weights. It uses the Straight-Through Estimator
(STE) for the backward pass.
"""
@staticmethod
@torch.compile
def forward(ctx, weight):
dtype = weight.dtype
weight = weight.float()
scale = 1.0 / weight.abs().mean().clamp_(min=1e-5)
weight = (weight * scale).round().clamp(-1, 1) / scale
return weight.to(dtype)
@staticmethod
def backward(ctx, grad_output):
grad_input = grad_output.clone()
return grad_input
class ActQuant(torch.autograd.Function):
"""
Implements a custom autograd function for activation quantization.
This performs symmetric 8-bit quantization (to the range [-128, 127])
based on the maximum absolute value along the last dimension (per-token/row scaling).
It uses the Straight-Through Estimator (STE) for the backward pass.
"""
@staticmethod
@torch.compile
def forward(ctx, activation):
dtype = activation.dtype
activation = activation.float()
scale = 127 / activation.abs().max(dim=-1, keepdim=True).values.clamp_(min=1e-5)
activation = (activation * scale).round().clamp(-128, 127) / scale
return activation.to(dtype)
@staticmethod
def backward(ctx, grad_output):
grad_input = grad_output.clone()
return grad_input
class AutoBitLinear(nn.Linear):
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
device=None,
dtype=None,
online_quant: bool = False,
use_rms_norm: bool = False,
rms_norm_eps: float = 1e-6,
):
super().__init__(in_features, out_features, bias)
self.online_quant = online_quant
# Optional RMSNorm
self.rms_norm = None
if use_rms_norm:
from ..models.llama.modeling_llama import LlamaRMSNorm
self.rms_norm = LlamaRMSNorm(in_features, eps=rms_norm_eps)
if not online_quant:
self.register_buffer(
"weight_scale",
torch.ones(
(1),
dtype=dtype,
device=device,
),
)
self._register_load_state_dict_pre_hook(self.load_hook)
def load_hook(
self,
state_dict,
prefix,
*args,
**kwargs,
):
if (prefix + "weight") in state_dict and state_dict[prefix + "weight"].dtype != self.weight.dtype:
state_dict[prefix + "weight"] = unpack_weights(state_dict[prefix + "weight"], dtype=self.weight.dtype)
return state_dict
def forward(self, input):
# Optional RMSNorm on activations prior to quantization.
if self.rms_norm is not None:
input = self.rms_norm(input)
if self.online_quant:
weight = WeightQuant.apply(self.weight)
else:
weight = self.weight
input = ActQuant.apply(input)
output = F.linear(input, weight, self.bias)
if not self.online_quant:
output = output * self.weight_scale
return output
def replace_with_bitnet_linear(model, modules_to_not_convert: list[str] | None = None, quantization_config=None):
"""
Public method that replaces the linear layers of the given model with bitnet quantized layers.
Args:
model (`torch.nn.Module`):
The model to convert, can be any `torch.nn.Module` instance.
modules_to_not_convert (`list[str]`, *optional*, defaults to `None`):
A list of nn.Linear weights to not convert. If a parameter path is in the list (e.g. `lm_head.weight`), the corresponding module will not be
converted.
quantization_config (`BitNetConfig`):
The quantization config object that contains the quantization parameters.
"""
has_been_replaced = False
# we need this to correctly materialize the weights during quantization
for module_name, module in model.named_modules():
if not should_convert_module(module_name, modules_to_not_convert):
continue
with torch.device("meta"):
if isinstance(module, nn.Linear):
if quantization_config and quantization_config.linear_class == "autobitlinear":
new_module = AutoBitLinear(
in_features=module.in_features,
out_features=module.out_features,
bias=module.bias is not None,
device=module.weight.device,
dtype=module.weight.dtype,
online_quant=(quantization_config.quantization_mode == "online"),
use_rms_norm=quantization_config.use_rms_norm,
rms_norm_eps=quantization_config.rms_norm_eps,
)
if quantization_config.quantization_mode == "offline":
new_module.requires_grad_(False)
else:
new_module = BitLinear(
in_features=module.in_features,
out_features=module.out_features,
bias=module.bias is not None,
device=module.weight.device,
dtype=module.weight.dtype,
use_rms_norm=quantization_config.use_rms_norm if quantization_config else False,
rms_norm_eps=quantization_config.rms_norm_eps if quantization_config else 1e-6,
)
new_module.requires_grad_(False)
model.set_submodule(module_name, new_module)
has_been_replaced = True
if not has_been_replaced:
logger.warning(
"You are loading your model using bitnet but no linear modules were found in your model."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug."
)
return model
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/integrations/spqr.py | src/transformers/integrations/spqr.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"SpQR (Sparse-Quantized Representation) integration file"
from ..quantizers.quantizers_utils import should_convert_module
from ..utils import is_spqr_available, is_torch_available, logging
if is_torch_available():
import torch
import torch.nn as nn
logger = logging.get_logger(__name__)
def replace_with_spqr_linear(model, modules_to_not_convert: list[str] | None = None, quantization_config=None):
"""
Public method that replaces the Linear layers of the given model with SPQR quantized layers.
Args:
model (`torch.nn.Module`):
The model to convert, can be any `torch.nn.Module` instance.
modules_to_not_convert (`list[str]`, *optional*, defaults to `None`):
A list of nn.Linear weights to not convert. If a parameter path is in the list (e.g. `lm_head.weight`), the corresponding module will not be
converted.
quantization_config (`SpQRConfig`):
The quantization config object that contains the quantization parameters.
"""
if is_spqr_available():
from spqr_quant import QuantizedLinear
has_been_replaced = False
# we need this to correctly materialize the weights during quantization
for module_name, module in model.named_modules():
if not should_convert_module(module_name, modules_to_not_convert):
continue
with torch.device("meta"):
if isinstance(module, nn.Linear):
shapes = quantization_config.shapes
new_module = QuantizedLinear.create_placehodler(
rows=module.out_features,
cols=module.in_features,
bits=quantization_config.bits,
beta1=quantization_config.beta1,
beta2=quantization_config.beta2,
dense_weights_shape=shapes[f"{module_name}.dense_weights.shape"],
row_offsets_shape=shapes[f"{module_name}.row_offsets.shape"],
col_vals_shape=shapes[f"{module_name}.col_vals.shape"],
in_perm_shape=shapes[f"{module_name}.in_perm.shape"],
)
# Force requires grad to False to avoid unexpected errors
model._modules[module_name].requires_grad_(False)
model.set_submodule(module_name, new_module)
has_been_replaced = True
if not has_been_replaced:
logger.warning(
"You are loading your model using eetq but no linear modules were found in your model."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug."
)
return model
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/integrations/fsdp.py | src/transformers/integrations/fsdp.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import os
from typing import TYPE_CHECKING
from ..utils import is_torch_available, strtobool
if TYPE_CHECKING:
from torch import nn
def is_fsdp_managed_module(module: nn.Module) -> bool:
if not is_torch_available():
return False
import torch
if not torch.distributed.is_available():
return False
import torch.distributed.fsdp
return isinstance(module, torch.distributed.fsdp.FullyShardedDataParallel) or getattr(
module, "_is_fsdp_managed_module", False
)
def is_fsdp_enabled():
if is_torch_available():
import torch
return (
torch.distributed.is_available()
and torch.distributed.is_initialized()
and strtobool(os.environ.get("ACCELERATE_USE_FSDP", "False")) == 1
and strtobool(os.environ.get("FSDP_CPU_RAM_EFFICIENT_LOADING", "False")) == 1
)
return False
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/integrations/tpu.py | src/transformers/integrations/tpu.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torch.utils.data import DataLoader
from ..utils import is_torch_xla_available
def tpu_spmd_dataloader(dataloader: DataLoader):
if is_torch_xla_available():
import torch_xla.distributed.parallel_loader as pl
assert isinstance(dataloader, pl.MpDeviceLoader), (
"The dataloader must be a `torch_xla.distributed.parallel_loader.MpDeviceLoader`."
)
# This is to support PyTorch/XLA FSDP via SPMD.
# Here we shard the input data's 0th dim across the fsdp axis.
import torch_xla.distributed.spmd as xs
sharding_spec = xs.ShardingSpec(xs.get_global_mesh(), ("fsdp", None))
dataloader._parallel_loader_kwargs["input_sharding"] = sharding_spec
return dataloader
else:
return dataloader
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/integrations/sdpa_paged.py | src/transformers/integrations/sdpa_paged.py | import torch
from ..generation.continuous_batching.cache import PagedAttentionCache
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def sdpa_attention_paged_forward(
module: torch.nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: torch.Tensor | None,
dropout: float = 0.0,
scaling: float | None = None,
**kwargs,
) -> tuple[torch.Tensor, None]:
# Add KV cache to the key and value tensors
cache: PagedAttentionCache | None = kwargs.pop("cache", None)
if cache is not None:
# This changes the shape of k and v from [1, num_kv_heads, seqlen_kv, head_dim] to [-1, num_kv_heads, head_dim]
key, value = cache.update(
key_states=key,
value_states=value,
layer_idx=module.layer_idx,
read_index=kwargs["read_index"],
write_index=kwargs["write_index"],
)
key = key.transpose(0, 1).unsqueeze(0)
value = value.transpose(0, 1).unsqueeze(0)
# Repeat the key and value tensors for each group of key-value heads
if hasattr(module, "num_key_value_groups"):
key = repeat_kv(key, module.num_key_value_groups)
value = repeat_kv(value, module.num_key_value_groups)
# Get the right causal mask for the current layer
causal_mask = attention_mask
# Run the actual attention
query = query.contiguous()
key = key.contiguous()
value = value.contiguous()
attn_output = torch.nn.functional.scaled_dot_product_attention(
query,
key,
value,
attn_mask=causal_mask,
dropout_p=dropout,
scale=scaling,
# Packed sequence format is used for input, so that it can never be causal.
is_causal=False,
)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, None
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/integrations/accelerate.py | src/transformers/integrations/accelerate.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Some of the functions here are derived from the `accelerate` library, with some tweaks for better performances
and simplicity/ease of use.
"""
import copy
import inspect
import os
import re
from collections import OrderedDict, defaultdict
from typing import TYPE_CHECKING
from safetensors import safe_open
from safetensors.torch import save_file
from ..utils import (
is_accelerate_available,
is_torch_available,
is_torch_xpu_available,
logging,
)
from ..utils.quantization_config import QuantizationMethod
from .deepspeed import is_deepspeed_zero3_enabled
from .fsdp import is_fsdp_enabled
if is_torch_available():
import torch
import torch.nn as nn
if is_accelerate_available():
from accelerate import dispatch_model
from accelerate.utils import get_max_memory
from accelerate.utils.modeling import clean_device_map, get_max_layer_size, get_module_size_with_ties
if TYPE_CHECKING:
from ..modeling_utils import PreTrainedModel
from ..quantizers import HfQuantizer
logger = logging.get_logger(__name__)
def check_and_set_device_map(device_map: "torch.device | int | str | dict | None") -> dict | str | None:
from ..modeling_utils import get_torch_context_manager_or_global_device
# Potentially detect context manager or global device, and use it (only if no device_map was provided)
if device_map is None and not is_deepspeed_zero3_enabled():
device_in_context = get_torch_context_manager_or_global_device()
if device_in_context == torch.device("meta"):
raise RuntimeError(
"You are using `from_pretrained` with a meta device context manager or `torch.set_default_device('meta')`.\n"
"This is an anti-pattern as `from_pretrained` wants to load existing weights.\nIf you want to initialize an "
"empty model on the meta device, use the context manager or global device with `from_config`, or `ModelClass(config)`"
)
device_map = device_in_context
# change device_map into a map if we passed an int, a str or a torch.device
if isinstance(device_map, torch.device):
device_map = {"": device_map}
elif isinstance(device_map, str) and device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
try:
if device_map == "cuda":
# setting to the local rank
local_rank = int(os.environ.get("LOCAL_RANK", 0))
device_map = f"cuda:{local_rank}"
device_map = {"": torch.device(device_map)}
except RuntimeError:
raise ValueError(
"When passing device_map as a string, the value needs to be a device name (e.g. cpu, cuda:0) or "
f"'auto', 'balanced', 'balanced_low_0', 'sequential' but found {device_map}."
)
elif isinstance(device_map, int):
if device_map < 0:
raise ValueError(
"You can't pass device_map as a negative int. If you want to put the model on the cpu, pass device_map = 'cpu' "
)
else:
device_map = {"": device_map}
if device_map is not None:
if is_deepspeed_zero3_enabled():
raise ValueError("DeepSpeed Zero-3 is not compatible with passing a `device_map`.")
if not is_accelerate_available():
raise ValueError(
"Using a `device_map`, `tp_plan`, `torch.device` context manager or setting `torch.set_default_device(device)` "
"requires `accelerate`. You can install it with `pip install accelerate`"
)
return device_map
def compute_module_sizes(
model: "PreTrainedModel",
hf_quantizer: "HfQuantizer | None" = None,
buffers_only: bool = False,
only_modules: bool = True,
) -> tuple[dict[str, int], dict[str, int]]:
"""
Compute the size of each submodule of a given model (in bytes).
Returns a tuple of 2 dicts, the fist one containing a mapping of all the modules and the corresponding size
in bytes, and the 2nd one containing a mapping from all leaf modules (modules containing parameters, the end of
the model graph) and the corresponding sizes.
If `only_modules` is set to False, the first mapping will not only contain the size of all modules, but also
the size of all parameters and buffers.
"""
all_module_sizes = defaultdict(int)
leaves_module_sizes = defaultdict(int)
if buffers_only:
iterator = model.named_buffers()
else:
# We need parameters + buffers here, as state_dict does not count non-persistent buffers which are taking space
def all_tensors():
yield from model.named_parameters()
yield from model.named_buffers()
iterator = all_tensors()
tied_keys = getattr(model, "all_tied_weights_keys", {}).keys()
for name, param in iterator:
# Do not count tied keys (the model is usually not tied yet here, so they will appear in the iterator)
# If the model is already tied, then they simply do not appear in the iterator anyway (remove_duplicates=True by default)
if name in tied_keys:
continue
if hf_quantizer is not None:
dtype_size = hf_quantizer.param_element_size(model, name, param)
else:
dtype_size = param.element_size()
size = param.numel() * dtype_size
name_parts = name.split(".")
for idx in range(len(name_parts)):
all_module_sizes[".".join(name_parts[:idx])] += size
if "." in name:
leaves_module_sizes[name.rsplit(".", 1)[0]] += size
# If we want to also have the full leaves in `all_module_sizes`
if not only_modules:
all_module_sizes[name] += size
return all_module_sizes, leaves_module_sizes
def compute_module_total_buffer_size(model: nn.Module, hf_quantizer: "HfQuantizer | None" = None):
"""
Compute the total size of buffers in each submodule of a given model.
"""
module_sizes, _ = compute_module_sizes(model, hf_quantizer, buffers_only=True)
return module_sizes.get("", 0)
def get_balanced_memory(
model: "PreTrainedModel",
max_memory: dict[int | str, int | str] | None = None,
no_split_module_classes: list[str] | None = None,
hf_quantizer: "HfQuantizer | None" = None,
low_zero: bool = False,
):
"""
Compute a `max_memory` dictionary for [`infer_auto_device_map`] that will balance the use of each available GPU.
<Tip>
All computation is done analyzing sizes and dtypes of the model parameters. As a result, the model can be on the
meta device (as it would if initialized within the `init_empty_weights` context manager).
</Tip>
Args:
model (`PreTrainedModel`):
The model to analyze.
max_memory (`Dict`, *optional*):
A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset.
Example: `max_memory={0: "1GB"}`.
no_split_module_classes (`List[str]`, *optional*):
A list of layer class names that should never be split across device (for instance any layer that has a
residual connection).
hf_quantizer (`HfQuantizer`, *optional*):
A quantizer for the model.
low_zero (`bool`, *optional*):
Minimizes the number of weights on GPU 0, which is convenient when it's used for other operations (like the
Transformers generate function).
"""
# Get default / clean up max_memory
user_not_set_max_memory = max_memory is None
max_memory = get_max_memory(max_memory)
# Check the number of accelerators available
accelerator_max_memory = copy.deepcopy(max_memory)
_, _ = accelerator_max_memory.pop("cpu", None), accelerator_max_memory.pop("disk", None)
num_devices = len([d for d in accelerator_max_memory if accelerator_max_memory[d] > 0])
if num_devices == 0:
return max_memory
if num_devices == 1:
# We cannot do low_zero on just one GPU, but we will still reserve some memory for the buffer
low_zero = False
# If user just asked us to handle memory usage, we should avoid OOM
if user_not_set_max_memory:
for key in max_memory.keys():
if isinstance(key, int):
max_memory[key] *= 0.9 # 90% is a good compromise
logger.info(
f"We will use 90% of the memory on device {key} for storing the model, and 10% for the buffer to avoid OOM. "
"You can set `max_memory` in to a higher value to use more memory (at your own risk)."
)
break # only one device
module_sizes, leave_modules_sizes = compute_module_sizes(model, hf_quantizer)
per_gpu = module_sizes[""] // (num_devices - 1 if low_zero else num_devices)
# We can't just set the memory to model_size // num_devices as it will end being too small: each GPU will get
# slightly less layers and some layers will end up offload at the end. So this function computes a buffer size to
# add which is the biggest of:
# - the size of no split block (if applicable)
# - the mean of the layer sizes
if no_split_module_classes is None:
no_split_module_classes = []
elif not isinstance(no_split_module_classes, (list, tuple)):
no_split_module_classes = [no_split_module_classes]
# Identify the size of the no_split_block modules
buffer = 0
if len(no_split_module_classes) > 0:
no_split_children = {}
for name, size in module_sizes.items():
if name == "":
continue
submodule = model.get_submodule(name)
class_name = submodule.__class__.__name__
if class_name in no_split_module_classes and class_name not in no_split_children:
no_split_children[class_name] = size
if set(no_split_children.keys()) == set(no_split_module_classes):
break
buffer = max(no_split_children.values()) if len(no_split_children) > 0 else 0
mean_leaves = int(sum(leave_modules_sizes.values()) / max(len(leave_modules_sizes), 1))
buffer = int(1.25 * max(buffer, mean_leaves))
per_gpu += buffer
# Sorted list of GPUs id (we may have some gpu ids not included in the our max_memory list - let's ignore them)
gpus_idx_list = sorted(
device_id for device_id, device_mem in max_memory.items() if isinstance(device_id, int) and device_mem > 0
)
# The last device is left with max_memory just in case the buffer is not enough.
for idx in gpus_idx_list[:-1]:
max_memory[idx] = min(max_memory[0] if low_zero and idx == 0 else per_gpu, max_memory[idx])
if low_zero:
min_zero = max(0, module_sizes[""] - sum([max_memory[i] for i in range(1, num_devices)]))
max_memory[0] = min(min_zero, max_memory[0])
return max_memory
def _get_device_map(
model: "PreTrainedModel",
device_map: dict | str | None,
max_memory: dict | None,
hf_quantizer: "HfQuantizer | None",
) -> dict:
"""Compute the final `device_map` to use if we passed a value in ['auto', 'balanced', 'balanced_low_0', 'sequential'].
Otherwise, we check for any device inconsistencies in the device_map.
"""
if isinstance(device_map, str):
no_split_modules = model._get_no_split_modules(device_map)
if device_map != "sequential":
inferred_max_memory = get_balanced_memory(
model,
max_memory=max_memory,
no_split_module_classes=no_split_modules,
hf_quantizer=hf_quantizer,
low_zero=(device_map == "balanced_low_0"),
)
else:
inferred_max_memory = get_max_memory(max_memory)
# If the user does not provide `max_memory`, accelerate sets the WHOLE cpu available memory as available.
# This is unwanted, as we don't want to set extremely tight bound and pressure for cpu if we are memory-constrained,
# especially if the model uses WeightConverter (because there will be some uncontrollable cpu memory spikes during
# the conversions before we resave the weights). In those cases, it's better to offload to disk a bit more
# if we were in-between, as otherwise we blow-up cpu memory
if max_memory is None:
inferred_max_memory["cpu"] *= 0.90
if hf_quantizer is not None:
inferred_max_memory = hf_quantizer.adjust_max_memory(inferred_max_memory)
# `inferred_max_memory` contains non-reserved memory. There may be *unused* reserved memory in the GPU,
# which we can use to allocate parameters.
for device_name in inferred_max_memory:
if isinstance(device_name, int): # it's a GPU device
if is_torch_xpu_available():
unused_memory = torch.xpu.memory_reserved(device_name) - torch.xpu.memory_allocated(device_name)
else:
unused_memory = torch.cuda.memory_reserved(device_name) - torch.cuda.memory_allocated(device_name)
inferred_max_memory[device_name] += unused_memory
# respect the `max_memory` passed by the user
if max_memory is not None and device_name in max_memory:
inferred_max_memory[device_name] = min(inferred_max_memory[device_name], max_memory[device_name])
device_map = infer_auto_device_map(
model,
max_memory=inferred_max_memory,
no_split_module_classes=no_split_modules,
hf_quantizer=hf_quantizer,
)
if hf_quantizer is not None:
hf_quantizer.validate_environment(device_map=device_map)
return device_map
def accelerate_dispatch(model, hf_quantizer, device_map, offload_folder, offload_index, offload_buffers):
device_map_kwargs = {
"device_map": device_map,
"offload_dir": offload_folder,
"offload_index": offload_index,
"offload_buffers": offload_buffers,
}
if "skip_keys" in inspect.signature(dispatch_model).parameters:
device_map_kwargs["skip_keys"] = model._skip_keys_device_placement
# For HQQ method we force-set the hooks for single GPU envs
if (
"force_hooks" in inspect.signature(dispatch_model).parameters
and hf_quantizer is not None
and hf_quantizer.quantization_config.quant_method == QuantizationMethod.HQQ
):
device_map_kwargs["force_hooks"] = True
if (
hf_quantizer is not None
and hf_quantizer.quantization_config.quant_method == QuantizationMethod.FBGEMM_FP8
and isinstance(device_map, dict)
and ("cpu" in device_map.values() or "disk" in device_map.values())
):
device_map_kwargs["offload_buffers"] = True
if not is_fsdp_enabled() and not is_deepspeed_zero3_enabled():
dispatch_model(model, **device_map_kwargs)
def expand_device_map(device_map: dict | None, param_names: list[str]):
"""
Expand a device map to return the correspondence parameter name to device.
"""
if device_map is None:
return dict.fromkeys(param_names, "cpu")
# Here, we first sort by number of submodules, then length of the full string, to make sure to match correctly
device_map_regex = re.compile(
"|".join(rf"({k})" for k in sorted(device_map.keys(), key=lambda x: (x.count("."), len(x)), reverse=True))
)
new_device_map = {}
for param in param_names:
device_match = device_map_regex.match(param)
new_device_map[param] = device_map[device_match.group()] if device_match else device_map.get("", "cpu")
return new_device_map
def get_device(device_map: dict | None, param_name: str, valid_torch_device: bool = False) -> torch.device | str | int:
"""Return the device on which `param_name` should be according to the `device_map`. If `valid_torch_device` is `True`,
then if the device is `"disk"`, `"cpu"` will be returned instead."""
device = expand_device_map(device_map, [param_name])[param_name]
if valid_torch_device and device == "disk":
return "cpu"
return device
def accelerate_disk_offload(
model: "PreTrainedModel",
disk_offload_folder: str | None,
checkpoint_files: list[str] | None,
device_map: dict,
sharded_metadata: dict | None,
dtype: torch.dtype | None,
weight_mapping=None,
):
"""
Prepare the `disk_offload_index` that will be used for reading offloaded parameters. If reading from a safetensors
file, parameters which do not need any special WeightConverter operation during loading (i.e. they are used as-is, or only
renamed) will be mapped to where they already reside on disk. Otherwise, the parameters will be resaved inside
`disk_offload_folder` during loading.
"""
from ..core_model_loading import WeightRenaming, rename_source_key
if disk_offload_folder is not None:
os.makedirs(disk_offload_folder, exist_ok=True)
is_offloaded_safetensors = checkpoint_files is not None and checkpoint_files[0].endswith(".safetensors")
renamings = []
if weight_mapping is not None:
renamings = [entry for entry in weight_mapping if isinstance(entry, WeightRenaming)]
# In this case, the offload index is simply the existing safetensors (except if using custom weight loading
# Operation, e.g. the MoE models, where we need to resave the weights that were changed at loading time)
if is_offloaded_safetensors:
meta_state_dict = model.state_dict()
param_device_map = expand_device_map(device_map, meta_state_dict.keys())
str_dtype = str(dtype).replace("torch.", "") if dtype is not None else "float32"
if sharded_metadata is None:
weight_map = dict.fromkeys(safe_open(checkpoint_files[0], framework="pt").keys(), checkpoint_files[0])
else:
folder = os.path.sep.join(checkpoint_files[0].split(os.path.sep)[:-1])
weight_map = {k: os.path.join(folder, v) for k, v in sharded_metadata["weight_map"].items()}
# Update the weight names according to the `weight_mapping`
weight_renaming_map = {
rename_source_key(k, renamings, [], model.base_model_prefix, meta_state_dict)[0]: k for k in weight_map
}
# Prepare the index using existing safetensors files
disk_offload_index = {
target_name: {
"safetensors_file": weight_map[source_name],
"weight_name": source_name,
"dtype": str_dtype,
}
for target_name, source_name in weight_renaming_map.items()
# Need to check if it's in the mapping in case of unexpected keys that would result in KeyError (we skip them)
if target_name in param_device_map and param_device_map[target_name] == "disk"
}
# In this case we will resave every offloaded weight
else:
disk_offload_index = {}
return disk_offload_index
def offload_weight(weight: torch.Tensor, weight_name: str, offload_folder: str | None, offload_index: dict) -> dict:
"""Write `weight` to disk inside `offload_folder`, and update `offload_index` accordingly. Everything is
saved in `safetensors` format."""
if offload_folder is None:
raise ValueError(
"The current `device_map` had weights offloaded to the disk, which needed to be re-saved. This is either "
"because the weights are not in `safetensors` format, or because the model uses an internal weight format "
"different than the one saved (i.e. most MoE models). Please provide an `offload_folder` for them in "
"`from_pretrained`."
)
# Write the weight to disk
safetensor_file = os.path.join(offload_folder, f"{weight_name}.safetensors")
save_file({weight_name: weight}, safetensor_file)
# Update the offloading index
str_dtype = str(weight.dtype).replace("torch.", "")
offload_index[weight_name] = {"safetensors_file": safetensor_file, "weight_name": weight_name, "dtype": str_dtype}
return offload_index
def load_offloaded_parameter(model: "PreTrainedModel", param_name: str) -> torch.Tensor:
"""Load `param_name` from disk, if it was offloaded due to the device_map, and thus lives as a meta parameter
inside `model`.
This is needed when resaving a model, when some parameters were offloaded (we need to load them from disk, to
then resave them to disk in the correct shard...)."""
# Start from the most inner module, and try to find the hook that was used for offloading the param
module_parts = param_name.split(".")
modules_to_check = [".".join(module_parts[:-idx]) for idx in range(1, len(module_parts))] + [""]
for parent_name in modules_to_check:
parent = model.get_submodule(parent_name)
if hasattr(parent, "_hf_hook"):
weights_map = parent._hf_hook.weights_map
truncated_param_name = param_name.replace(f"{parent_name}." if parent_name != "" else parent_name, "")
break
# If we did not break the loop, something is wrong
else:
raise ValueError(
f"{param_name} is on the meta device because it was offloaded, but we could not find "
"the corresponding hook for it"
)
# This call loads it from disk
tensor = weights_map[truncated_param_name]
return tensor
def _init_infer_auto_device_map(
model: nn.Module,
max_memory: dict[int | str, int | str] | None = None,
no_split_module_classes: list[str] | None = None,
tied_parameters: list[list[str]] | None = None,
hf_quantizer: "HfQuantizer | None" = None,
) -> tuple[
list[int | str],
dict[int | str, int | str],
list[int | str],
list[int],
dict[str, int],
list[list[str]],
list[str],
list[tuple[str, nn.Module]],
]:
"""
Initialize variables required for computing the device map for model allocation.
"""
max_memory = get_max_memory(max_memory)
if no_split_module_classes is None:
no_split_module_classes = []
elif not isinstance(no_split_module_classes, (list, tuple)):
no_split_module_classes = [no_split_module_classes]
devices = list(max_memory.keys())
if "disk" not in devices:
devices.append("disk")
gpus = [device for device in devices if device not in ["cpu", "disk"]]
# Devices that need to keep space for a potential offloaded layer.
if "mps" in gpus:
main_devices = ["mps"]
elif len(gpus) > 0:
main_devices = [gpus[0], "cpu"]
else:
main_devices = ["cpu"]
module_sizes, _ = compute_module_sizes(model, hf_quantizer, only_modules=False)
if tied_parameters is None:
if len(model.all_tied_weights_keys) > 0:
# create a list of list of tied params based on unique tied groups
groups = set(model.all_tied_weights_keys.values())
tied_parameters = [
sorted([k for k, v in model.all_tied_weights_keys.items() if v == target] + [target])
for target in groups
]
else:
tied_parameters = [[]]
# Direct submodules and parameters
modules_to_treat = (
list(model.named_parameters(recurse=False))
+ list(model.named_children())
+ list(model.named_buffers(recurse=False))
)
return (
devices,
max_memory,
main_devices,
gpus,
module_sizes,
tied_parameters,
no_split_module_classes,
modules_to_treat,
)
def infer_auto_device_map(
model: nn.Module,
max_memory: dict[int | str, int | str] | None = None,
no_split_module_classes: list[str] | None = None,
verbose: bool = False,
clean_result: bool = True,
offload_buffers: bool = False,
tied_parameters: list[list[str]] | None = None,
hf_quantizer: "HfQuantizer | None" = None,
):
"""
Compute a device map for a given model giving priority to GPUs, then offload on CPU and finally offload to disk,
such that:
- we don't exceed the memory available of any of the GPU.
- if offload to the CPU is needed, there is always room left on GPU 0 to put back the layer offloaded on CPU that
has the largest size.
- if offload to the CPU is needed,we don't exceed the RAM available on the CPU.
- if offload to the disk is needed, there is always room left on the CPU to put back the layer offloaded on disk
that has the largest size.
<Tip>
All computation is done analyzing sizes and dtypes of the model parameters. As a result, the model can be on the
meta device (as it would if initialized within the `init_empty_weights` context manager).
</Tip>
Args:
model (`torch.nn.Module`):
The model to analyze.
max_memory (`Dict`, *optional*):
A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset.
Example: `max_memory={0: "1GB"}`.
no_split_module_classes (`List[str]`, *optional*):
A list of layer class names that should never be split across device (for instance any layer that has a
residual connection).
verbose (`bool`, *optional*, defaults to `False`):
Whether or not to provide debugging statements as the function builds the device_map.
clean_result (`bool`, *optional*, defaults to `True`):
Clean the resulting device_map by grouping all submodules that go on the same device together.
offload_buffers (`bool`, *optional*, defaults to `False`):
In the layers that are offloaded on the CPU or the hard drive, whether or not to offload the buffers as
well as the parameters.
"""
# Initialize the variables
(
devices,
max_memory,
main_devices,
gpus,
module_sizes,
tied_parameters,
no_split_module_classes,
modules_to_treat,
) = _init_infer_auto_device_map(model, max_memory, no_split_module_classes, tied_parameters, hf_quantizer)
device_map = OrderedDict()
current_device = 0
device_memory_used = dict.fromkeys(devices, 0)
device_buffer_sizes = {}
device_minimum_assignment_memory = {}
# Initialize maximum largest layer, to know which space to keep in memory
max_layer_size, max_layer_names = get_max_layer_size(modules_to_treat, module_sizes, no_split_module_classes)
# Ready ? This is going to be a bit messy.
while len(modules_to_treat) > 0:
name, module = modules_to_treat.pop(0)
if verbose:
print(f"\nTreating module {name}.")
# Max size in the remaining layers may have changed since we took one, so we maybe update it.
max_layer_names = [n for n in max_layer_names if n != name and not n.startswith(name + ".")]
if len(max_layer_names) == 0:
max_layer_size, max_layer_names = get_max_layer_size(
[(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)],
module_sizes,
no_split_module_classes,
)
# Assess size needed
module_size = module_sizes[name]
# We keep relevant tied parameters only: one of the tied parameters in the group is inside the current module
# and the other is not.
# Note: If we are currently processing the name `compute.weight`, an other parameter named
# e.g. `compute.weight_submodule.parameter`
# needs to be considered outside the current module, hence the check with additional dots.
tied_param_groups = [
tied_group
for tied_group in tied_parameters
if any(name + "." in k + "." for k in tied_group) and not all(name + "." in k + "." for k in tied_group)
]
if verbose and len(tied_param_groups) > 0:
print(f" Found the relevant tied param groups {tied_param_groups}")
# Then we keep track of all the parameters that are tied to the current module, but not in the current module
tied_params = sum(
[[p for p in tied_group if name + "." not in p + "."] for tied_group in tied_param_groups], []
)
if verbose and len(tied_params) > 0:
print(f" So those parameters need to be taken into account {tied_params}")
device = devices[current_device]
current_max_size = max_memory[device] if device != "disk" else None
current_memory_reserved = 0
# Reduce max size available by the largest layer.
if devices[current_device] in main_devices:
current_max_size = current_max_size - max_layer_size
current_memory_reserved = max_layer_size
module_size_with_ties, tied_module_names, tied_modules = get_module_size_with_ties(
tied_params, module_size, module_sizes, modules_to_treat
)
# The module and its tied modules fit on the current device.
if current_max_size is None or device_memory_used[device] + module_size_with_ties <= current_max_size:
if verbose:
output = f"Putting {name}"
if tied_module_names:
output += f" and {tied_module_names}"
else:
output += f" (size={module_size})"
if current_max_size is not None:
output += f" (available={current_max_size - device_memory_used[device]})"
output += f" on {device}."
print(output)
device_memory_used[device] += module_size_with_ties
# Assign the primary module to the device.
device_map[name] = device
# Assign tied modules if any.
for tied_module_name in tied_module_names:
if tied_module_name in [m[0] for m in modules_to_treat]:
# Find the index of the tied module in the list
tied_module_index = next(i for i, (n, _) in enumerate(modules_to_treat) if n == tied_module_name)
# Remove the tied module from the list to prevent reprocessing
modules_to_treat.pop(tied_module_index)
# Assign the tied module to the device
device_map[tied_module_name] = device
# Buffer Handling
if not offload_buffers and isinstance(module, nn.Module):
# Compute the total buffer size for the module
current_buffer_size = compute_module_total_buffer_size(module, hf_quantizer)
# Update the buffer size on the device
device_buffer_sizes[device] = device_buffer_sizes.get(device, 0) + current_buffer_size
continue
# The current module itself fits, so we try to split the tied modules.
if len(tied_params) > 0 and device_memory_used[device] + module_size <= current_max_size:
# can we split one of the tied modules to make it smaller or do we need to go on the next device?
if verbose:
print(
f"Not enough space on {devices[current_device]} to put {name} and {tied_module_names} (space "
f"available {current_max_size - device_memory_used[device]}, needed size {module_size_with_ties})."
)
split_happened = False
for tied_module_name, tied_module in zip(tied_module_names, tied_modules):
tied_module_children = list(tied_module.named_children())
if len(tied_module_children) == 0 or tied_module.__class__.__name__ in no_split_module_classes:
# can't break this one.
continue
if verbose:
print(f"Splitting {tied_module_name}.")
tied_module_children = list(tied_module.named_parameters(recurse=False)) + tied_module_children
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | true |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/integrations/__init__.py | src/transformers/integrations/__init__.py | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_torch_greater_or_equal
_import_structure = {
"aqlm": ["replace_with_aqlm_linear"],
"awq": [
"post_init_awq_exllama_modules",
"post_init_awq_ipex_modules",
"replace_quantization_scales",
"replace_with_awq_linear",
],
"bitnet": [
"BitLinear",
"pack_weights",
"replace_with_bitnet_linear",
"unpack_weights",
],
"bitsandbytes": [
"Bnb4bitQuantize",
"dequantize_and_replace",
"replace_with_bnb_linear",
"validate_bnb_backend_availability",
],
"deepspeed": [
"HfDeepSpeedConfig",
"HfTrainerDeepSpeedConfig",
"deepspeed_config",
"deepspeed_init",
"deepspeed_load_checkpoint",
"deepspeed_optim_sched",
"is_deepspeed_available",
"is_deepspeed_zero3_enabled",
"set_hf_deepspeed_config",
"unset_hf_deepspeed_config",
],
"eetq": ["replace_with_eetq_linear"],
"fbgemm_fp8": ["FbgemmFp8Linear", "FbgemmFp8Llama4TextExperts", "replace_with_fbgemm_fp8_linear"],
"finegrained_fp8": ["FP8Linear", "replace_with_fp8_linear"],
"fsdp": ["is_fsdp_enabled", "is_fsdp_managed_module"],
"ggml": [
"GGUF_CONFIG_DEFAULTS_MAPPING",
"GGUF_CONFIG_MAPPING",
"GGUF_TOKENIZER_MAPPING",
"_gguf_parse_value",
"load_dequant_gguf_tensor",
"load_gguf",
],
"higgs": [
"HiggsLinear",
"dequantize_higgs",
"quantize_with_higgs",
"replace_with_higgs_linear",
],
"hqq": ["prepare_for_hqq_linear"],
"hub_kernels": [
"LayerRepository",
"register_kernel_mapping",
"replace_kernel_forward_from_hub",
"use_kernel_forward_from_hub",
"use_kernel_func_from_hub",
"use_kernelized_func",
],
"integration_utils": [
"INTEGRATION_TO_CALLBACK",
"AzureMLCallback",
"ClearMLCallback",
"CodeCarbonCallback",
"CometCallback",
"DagsHubCallback",
"DVCLiveCallback",
"FlyteCallback",
"MLflowCallback",
"NeptuneCallback",
"NeptuneMissingConfiguration",
"SwanLabCallback",
"TensorBoardCallback",
"TrackioCallback",
"WandbCallback",
"get_available_reporting_integrations",
"get_reporting_integration_callbacks",
"hp_params",
"is_azureml_available",
"is_clearml_available",
"is_codecarbon_available",
"is_comet_available",
"is_dagshub_available",
"is_dvclive_available",
"is_flyte_deck_standard_available",
"is_flytekit_available",
"is_mlflow_available",
"is_neptune_available",
"is_optuna_available",
"is_ray_available",
"is_ray_tune_available",
"is_swanlab_available",
"is_tensorboard_available",
"is_trackio_available",
"is_wandb_available",
"rewrite_logs",
"run_hp_search_optuna",
"run_hp_search_ray",
"run_hp_search_wandb",
],
"mxfp4": [
"Mxfp4GptOssExperts",
"convert_moe_packed_tensors",
"dequantize",
"load_and_swizzle_mxfp4",
"quantize_to_mxfp4",
"replace_with_mxfp4_linear",
"swizzle_mxfp4",
],
"peft": ["PeftAdapterMixin"],
"quanto": ["replace_with_quanto_layers"],
"spqr": ["replace_with_spqr_linear"],
"vptq": ["replace_with_vptq_linear"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["executorch"] = [
"TorchExportableModuleWithStaticCache",
"convert_and_export_with_cache",
]
try:
if not is_torch_greater_or_equal("2.3"):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["tensor_parallel"] = [
"shard_and_distribute_module",
"ALL_PARALLEL_STYLES",
"translate_to_torch_parallel_style",
]
try:
if not is_torch_greater_or_equal("2.5"):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["flex_attention"] = [
"make_flex_block_causal_mask",
]
if TYPE_CHECKING:
from .aqlm import replace_with_aqlm_linear
from .awq import (
post_init_awq_exllama_modules,
post_init_awq_ipex_modules,
replace_quantization_scales,
replace_with_awq_linear,
)
from .bitnet import (
BitLinear,
pack_weights,
replace_with_bitnet_linear,
unpack_weights,
)
from .bitsandbytes import (
Bnb4bitQuantize,
dequantize_and_replace,
replace_with_bnb_linear,
validate_bnb_backend_availability,
)
from .deepspeed import (
HfDeepSpeedConfig,
HfTrainerDeepSpeedConfig,
deepspeed_config,
deepspeed_init,
deepspeed_load_checkpoint,
deepspeed_optim_sched,
is_deepspeed_available,
is_deepspeed_zero3_enabled,
set_hf_deepspeed_config,
unset_hf_deepspeed_config,
)
from .eetq import replace_with_eetq_linear
from .fbgemm_fp8 import FbgemmFp8Linear, FbgemmFp8Llama4TextExperts, replace_with_fbgemm_fp8_linear
from .finegrained_fp8 import FP8Linear, replace_with_fp8_linear
from .fsdp import is_fsdp_enabled, is_fsdp_managed_module
from .ggml import (
GGUF_CONFIG_DEFAULTS_MAPPING,
GGUF_CONFIG_MAPPING,
GGUF_TOKENIZER_MAPPING,
_gguf_parse_value,
load_dequant_gguf_tensor,
load_gguf,
)
from .higgs import HiggsLinear, dequantize_higgs, quantize_with_higgs, replace_with_higgs_linear
from .hqq import prepare_for_hqq_linear
from .hub_kernels import (
LayerRepository,
register_kernel_mapping,
replace_kernel_forward_from_hub,
use_kernel_forward_from_hub,
use_kernel_func_from_hub,
use_kernelized_func,
)
from .integration_utils import (
INTEGRATION_TO_CALLBACK,
AzureMLCallback,
ClearMLCallback,
CodeCarbonCallback,
CometCallback,
DagsHubCallback,
DVCLiveCallback,
FlyteCallback,
MLflowCallback,
NeptuneCallback,
NeptuneMissingConfiguration,
SwanLabCallback,
TensorBoardCallback,
TrackioCallback,
WandbCallback,
get_available_reporting_integrations,
get_reporting_integration_callbacks,
hp_params,
is_azureml_available,
is_clearml_available,
is_codecarbon_available,
is_comet_available,
is_dagshub_available,
is_dvclive_available,
is_flyte_deck_standard_available,
is_flytekit_available,
is_mlflow_available,
is_neptune_available,
is_optuna_available,
is_ray_available,
is_ray_tune_available,
is_swanlab_available,
is_tensorboard_available,
is_trackio_available,
is_wandb_available,
rewrite_logs,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_wandb,
)
from .mxfp4 import (
Mxfp4GptOssExperts,
dequantize,
load_and_swizzle_mxfp4,
quantize_to_mxfp4,
replace_with_mxfp4_linear,
swizzle_mxfp4,
)
from .peft import PeftAdapterMixin
from .quanto import replace_with_quanto_layers
from .spqr import replace_with_spqr_linear
from .vptq import replace_with_vptq_linear
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .executorch import TorchExportableModuleWithStaticCache, convert_and_export_with_cache
try:
if not is_torch_greater_or_equal("2.3"):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tensor_parallel import (
ALL_PARALLEL_STYLES,
shard_and_distribute_module,
translate_to_torch_parallel_style,
)
try:
if not is_torch_greater_or_equal("2.5"):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .flex_attention import make_flex_block_causal_mask
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/integrations/ggml.py | src/transformers/integrations/ggml.py | # coding=utf-8
# Copyright 2024 The ggml.ai team and The HuggingFace Inc. team. and pygguf author (github.com/99991)
# https://github.com/99991/pygguf
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Integration with GGML / The file is copied and adapted from https://github.com/99991/pygguf
with extra methods beings exposed
"""
from array import array
import numpy as np
from tokenizers import Tokenizer, decoders, normalizers, pre_tokenizers, processors
from tokenizers.models import BPE, Unigram
from .. import AddedToken
from ..convert_slow_tokenizer import GemmaConverter, GPT2Converter, LlamaConverter, Qwen2Converter, T5Converter
from ..utils import logging
from ..utils.logging import tqdm
logger = logging.get_logger(__name__)
GGUF_CONFIG_MAPPING = {
"general": {
"architecture": "model_type",
"name": "_model_name_or_path",
},
"llama": {
"context_length": "max_position_embeddings",
"block_count": "num_hidden_layers",
"feed_forward_length": "intermediate_size",
"embedding_length": "hidden_size",
# NOTE: rope.dimension_count==head_dim only suitable for llama/mistral
"rope.dimension_count": "head_dim",
"rope.freq_base": "rope_theta",
"attention.head_count": "num_attention_heads",
"attention.head_count_kv": "num_key_value_heads",
"attention.layer_norm_rms_epsilon": "rms_norm_eps",
"vocab_size": "vocab_size",
},
"mistral": {
"context_length": "max_position_embeddings",
"block_count": "num_hidden_layers",
"feed_forward_length": "intermediate_size",
"embedding_length": "hidden_size",
# NOTE: rope.dimension_count==head_dim only suitable for llama/mistral
"rope.dimension_count": "head_dim",
"rope.freq_base": "rope_theta",
"attention.head_count": "num_attention_heads",
"attention.head_count_kv": "num_key_value_heads",
"attention.layer_norm_rms_epsilon": "rms_norm_eps",
"vocab_size": "vocab_size",
},
"qwen2": {
"context_length": "max_position_embeddings",
"block_count": "num_hidden_layers",
"feed_forward_length": "intermediate_size",
"embedding_length": "hidden_size",
"rope.dimension_count": None,
"rope.freq_base": "rope_theta",
"attention.head_count": "num_attention_heads",
"attention.head_count_kv": "num_key_value_heads",
"attention.layer_norm_rms_epsilon": "rms_norm_eps",
"vocab_size": "vocab_size",
},
"qwen2_moe": {
"context_length": "max_position_embeddings",
"block_count": "num_hidden_layers",
"feed_forward_length": "intermediate_size",
"embedding_length": "hidden_size",
"rope.dimension_count": None,
"rope.freq_base": "rope_theta",
"attention.head_count": "num_attention_heads",
"attention.head_count_kv": "num_key_value_heads",
"attention.layer_norm_rms_epsilon": "rms_norm_eps",
"vocab_size": "vocab_size",
"expert_count": "num_experts",
"expert_used_count": "num_experts_per_tok",
},
"lfm2": {
"context_length": "max_position_embeddings",
"block_count": "num_hidden_layers",
"feed_forward_length": "intermediate_size",
"embedding_length": "hidden_size",
"rope.dimension_count": None,
"rope.freq_base": "rope_theta",
"attention.head_count": "num_attention_heads",
"attention.head_count_kv": "num_key_value_heads",
"attention.layer_norm_rms_epsilon": "rms_norm_eps",
"vocab_size": "vocab_size",
"shortconv.l_cache": "conv_L_cache",
},
"qwen3": {
"context_length": "max_position_embeddings",
"block_count": "num_hidden_layers",
"feed_forward_length": "intermediate_size",
"embedding_length": "hidden_size",
"rope.dimension_count": None,
"rope.freq_base": "rope_theta",
"attention.head_count": "num_attention_heads",
"attention.head_count_kv": "num_key_value_heads",
"attention.layer_norm_rms_epsilon": "rms_norm_eps",
"vocab_size": "vocab_size",
},
"qwen3_moe": {
"context_length": "max_position_embeddings",
"block_count": "num_hidden_layers",
"feed_forward_length": "intermediate_size",
"embedding_length": "hidden_size",
"rope.dimension_count": None,
"rope.freq_base": "rope_theta",
"attention.key_length": "head_dim",
"attention.head_count": "num_attention_heads",
"attention.head_count_kv": "num_key_value_heads",
"attention.layer_norm_rms_epsilon": "rms_norm_eps",
"vocab_size": "vocab_size",
"expert_count": "num_experts",
"expert_used_count": "num_experts_per_tok",
},
"falcon": {
"context_length": "max_position_embeddings",
"block_count": "num_hidden_layers",
"feed_forward_length": "intermediate_size",
"embedding_length": "hidden_size",
"rope.dimension_count": None,
"rope.freq_base": "rope_theta",
"attention.head_count": "num_attention_heads",
"attention.head_count_kv": "num_key_value_heads",
"attention.layer_norm_rms_epsilon": "rms_norm_eps",
"vocab_size": "vocab_size",
},
"tokenizer": {
"ggml.bos_token_id": "bos_token_id",
"ggml.eos_token_id": "eos_token_id",
"ggml.unknown_token_id": "unk_token_id",
"ggml.padding_token_id": "pad_token_id",
},
"phi3": {
"context_length": "max_position_embeddings",
"block_count": "num_hidden_layers",
"feed_forward_length": "intermediate_size",
"embedding_length": "hidden_size",
"rope.dimension_count": None,
"rope.freq_base": "rope_theta",
"attention.head_count": "num_attention_heads",
"attention.head_count_kv": "num_key_value_heads",
"attention.layer_norm_rms_epsilon": "rms_norm_eps",
"vocab_size": "vocab_size",
},
"bloom": {
"block_count": "n_layer",
"embedding_length": "hidden_size",
"attention.head_count": "n_head",
"vocab_size": "vocab_size",
"attention.layer_norm_epsilon": "layer_norm_epsilon",
},
"t5": {
"context_length": "n_positions",
"block_count": "num_layers",
"feed_forward_length": "d_ff",
"embedding_length": "d_model",
"attention.key_length": "d_kv",
"attention.head_count": "num_heads",
"attention.head_count_kv": "num_key_value_heads",
"attention.layer_norm_epsilon": "layer_norm_epsilon",
"attention.relative_buckets_count": "relative_attention_num_buckets",
"decoder_start_token_id": "decoder_start_token_id",
"vocab_size": "vocab_size",
},
"stablelm": {
"context_length": "max_position_embeddings",
"block_count": "num_hidden_layers",
"feed_forward_length": "intermediate_size",
"embedding_length": "hidden_size",
"rope.dimension_count": None,
"attention.head_count": "num_attention_heads",
"attention.head_count_kv": "num_key_value_heads",
"attention.layer_norm_epsilon": "layer_norm_eps",
"vocab_size": "vocab_size",
},
"gpt2": {
"block_count": "n_layer",
"context_length": "n_ctx",
"embedding_length": "n_embd",
"feed_forward_length": "feed_forward_length",
"attention.head_count": "n_head",
"attention.layer_norm_epsilon": "layer_norm_epsilon",
},
"starcoder2": {
"block_count": "num_hidden_layers",
"context_length": "max_position_embeddings",
"embedding_length": "hidden_size",
"feed_forward_length": "intermediate_size",
"attention.head_count": "num_attention_heads",
"attention.head_count_kv": "num_key_value_heads",
"attention.layer_norm_epsilon": "norm_epsilon",
},
"mamba": {
"vocab_size": "vocab_size",
"context_length": "max_position_embeddings",
"embedding_length": "hidden_size",
"attention.layer_norm_rms_epsilon": "layer_norm_epsilon",
"block_count": "num_hidden_layers",
"ssm.conv_kernel": "conv_kernel",
"ssm.state_size": "state_size",
"ssm.time_step_rank": "time_step_rank",
"ssm.inner_size": "intermediate_size",
},
"nemotron": {
"context_length": "max_position_embeddings",
"block_count": "num_hidden_layers",
"feed_forward_length": "intermediate_size",
"embedding_length": "hidden_size",
"rope.dimension_count": None,
"rope.freq_base": "rope_theta",
"attention.head_count": "num_attention_heads",
"attention.head_count_kv": "num_key_value_heads",
"attention.layer_norm_rms_epsilon": "norm_eps",
"vocab_size": "vocab_size",
},
"gemma2": {
"context_length": "max_position_embeddings",
"block_count": "num_hidden_layers",
"feed_forward_length": "intermediate_size",
"embedding_length": "hidden_size",
"rope.dimension_count": None,
"rope.freq_base": "rope_theta",
# NOTE: Gemma2 has key_length==value_length==head_dim
# See: https://github.com/ggerganov/llama.cpp/blob/2e2f8f093cd4fb6bbb87ba84f6b9684fa082f3fa/convert_hf_to_gguf.py#L3293-L3294
"attention.key_length": "head_dim",
"attention.head_count": "num_attention_heads",
"attention.head_count_kv": "num_key_value_heads",
"attention.layer_norm_rms_epsilon": "rms_norm_eps",
"attention.sliding_window": "sliding_window",
"vocab_size": "vocab_size",
},
"gemma3": {
"context_length": "max_position_embeddings",
"block_count": "num_hidden_layers",
"feed_forward_length": "intermediate_size",
"embedding_length": "hidden_size",
"rope.dimension_count": None,
"rope.freq_base": "rope_theta",
# NOTE: Gemma3 has key_length==value_length==head_dim
# See: https://github.com/ggml-org/llama.cpp/blob/fe5b78c89670b2f37ecb216306bed3e677b49d9f/convert_hf_to_gguf.py#L3495-L3496
"attention.key_length": "head_dim",
"attention.head_count": "num_attention_heads",
"attention.head_count_kv": "num_key_value_heads",
"attention.layer_norm_rms_epsilon": "rms_norm_eps",
"attention.sliding_window": "sliding_window",
"vocab_size": "vocab_size",
},
"umt5": {
"context_length": "n_positions",
"block_count": "num_layers",
"feed_forward_length": "d_ff",
"embedding_length": "d_model",
"attention.key_length": "d_kv",
"attention.head_count": "num_heads",
"attention.head_count_kv": "num_key_value_heads",
"attention.layer_norm_epsilon": "layer_norm_epsilon",
"attention.relative_buckets_count": "relative_attention_num_buckets",
"decoder_start_token_id": "decoder_start_token_id",
"vocab_size": "vocab_size",
},
"deci": {
"context_length": "max_position_embeddings",
"block_count": "num_hidden_layers",
"feed_forward_length": "intermediate_size",
"embedding_length": "hidden_size",
"rope.dimension_count": None,
"rope.freq_base": "rope_theta",
"attention.head_count": "num_attention_heads",
"attention.head_count_kv": "num_key_value_heads",
"attention.layer_norm_rms_epsilon": "rms_norm_eps",
"vocab_size": "vocab_size",
},
}
GGUF_TOKENIZER_MAPPING = {
"tokenizer": {
"ggml.model": "tokenizer_type",
"ggml.tokens": "tokens",
"ggml.scores": "scores",
"ggml.token_type": "token_type",
"ggml.merges": "merges",
"ggml.bos_token_id": "bos_token_id",
"ggml.eos_token_id": "eos_token_id",
"ggml.unknown_token_id": "unk_token_id",
"ggml.padding_token_id": "pad_token_id",
"ggml.add_space_prefix": "add_prefix_space",
},
"tokenizer_config": {
"chat_template": "chat_template",
"ggml.model": "model_type",
"ggml.bos_token_id": "bos_token_id",
"ggml.eos_token_id": "eos_token_id",
"ggml.unknown_token_id": "unk_token_id",
"ggml.padding_token_id": "pad_token_id",
},
}
# We only need to set here the parameters that default to different values between transformers and llamacpp.
GGUF_CONFIG_DEFAULTS_MAPPING = {
"qwen3_moe": {
# NOTE: Qwen3MoeConfig defaults to false but llama.cpp needs this to be true.
# See: https://github.com/ggml-org/llama.cpp/blob/17f7f4baad8b3a716ee139da7bb56ae984e8c0fa/src/models/qwen3moe.cpp#L85-L96
# (the parameter right after LLM_FFN_SILU corresponds to norm_topk_prob)
"norm_topk_prob": True,
},
}
def _gguf_parse_value(_value, data_type):
if not isinstance(data_type, list):
data_type = [data_type]
if len(data_type) == 1:
data_type = data_type[0]
array_data_type = None
else:
if data_type[0] != 9:
raise ValueError("Received multiple types, therefore expected the first type to indicate an array.")
data_type, array_data_type = data_type
if data_type in [0, 1, 2, 3, 4, 5, 10, 11]:
_value = int(_value[0])
elif data_type in [6, 12]:
_value = float(_value[0])
elif data_type == 7:
_value = bool(_value[0])
elif data_type == 8:
_value = array("B", list(_value)).tobytes().decode()
elif data_type == 9:
_value = _gguf_parse_value(_value, array_data_type)
return _value
class GGUFTokenizerSkeleton:
def __init__(self, dict_):
for k, v in dict_.items():
setattr(self, k, v)
if not hasattr(self, "merges"):
if not hasattr(self, "tokens") or not hasattr(self, "scores"):
raise ValueError(
"tokens and scores need to be passed for a LLaMa tokenizer without merges to be instantiated."
)
tokens = self.tokens
scores = self.scores
vocab = {t: scores[i] for i, t in enumerate(tokens)}
logger.warning("Merges were not in checkpoint, building merges on the fly.")
merges = []
for merge, piece_score in tqdm(vocab.items()):
local = []
for index in range(1, len(merge)):
piece_l, piece_r = merge[:index], merge[index:]
if piece_l in tokens and piece_r in tokens:
local.append((piece_l, piece_r, piece_score))
local = sorted(local, key=lambda x: (vocab[x[0]], vocab[x[1]]), reverse=True)
merges.extend(local)
merges = sorted(merges, key=lambda val: val[2], reverse=True)
merges = [(val[0], val[1]) for val in merges]
self.merges = merges
else:
self.merges = [tuple(merge.split(" ")) for merge in self.merges]
if not hasattr(self, "scores"):
self.scores = [None for _ in range(len(self.tokens))]
if not hasattr(self, "added_tokens"):
self.added_tokens = []
if not hasattr(self, "unk_token_id"):
self.unk_token_id = None
# Llama2 uses the field `unknown_token_id`
if hasattr(self, "unknown_token_id") and self.unk_token_id is None:
self.unk_token_id = self.unknown_token_id
class GGUFLlamaConverter(LlamaConverter):
def __init__(self, tokenizer_dict):
self.proto = GGUFTokenizerSkeleton(tokenizer_dict)
self.original_tokenizer = self.proto
self.additional_kwargs = {}
self.is_llama_3_tokenizer = getattr(self.proto, "tokenizer_type", "llama") != "llama"
def vocab(self, proto):
return list(zip(proto.tokens, proto.scores))
def merges(self, proto):
return proto.merges
def tokenizer(self, proto):
vocab_scores = self.vocab(self.proto)
merges = self.merges(self.proto)
bpe_vocab = {word: i for i, (word, _score) in enumerate(vocab_scores)}
unk_token = proto.tokens[proto.unk_token_id] if proto.unk_token_id is not None else None
bos_token = proto.tokens[proto.bos_token_id] if getattr(proto, "bos_token_id", None) is not None else None
eos_token = proto.tokens[proto.bos_token_id] if getattr(proto, "eos_token_id", None) is not None else None
tokenizer = Tokenizer(
BPE(
bpe_vocab,
merges,
unk_token=unk_token,
fuse_unk=True,
byte_fallback=True,
)
)
special_tokens = []
if not hasattr(self.proto, "token_type"):
if unk_token is not None:
special_tokens.append(AddedToken(unk_token, normalized=False, special=True))
if bos_token is not None:
special_tokens.append(AddedToken(bos_token, normalized=False, special=True))
if eos_token is not None:
special_tokens.append(AddedToken(eos_token, normalized=False, special=True))
else:
# 3 stands for special tokens
special_tokens_idx = np.where(np.array(self.proto.token_type) == 3)[0]
for idx in special_tokens_idx:
special_tokens.append(AddedToken(self.proto.tokens[idx], normalized=False, special=True))
if len(special_tokens) != 0:
tokenizer.add_special_tokens(special_tokens)
if len(self.proto.added_tokens) != 0:
tokenizer.add_tokens(
[AddedToken(added_token, normalized=False, special=False) for added_token in self.proto.added_tokens]
)
self.additional_kwargs["unk_token"] = unk_token
self.additional_kwargs["eos_token"] = bos_token
self.additional_kwargs["bos_token"] = eos_token
if self.is_llama_3_tokenizer:
self.additional_kwargs["add_prefix_space"] = None
self.additional_kwargs["clean_up_tokenization_spaces"] = True
self.additional_kwargs["legacy"] = False
self.original_tokenizer.legacy = False
return tokenizer
def decoder(self, replacement, add_prefix_space):
sequence = [
decoders.ByteFallback(),
decoders.Fuse(),
decoders.Replace("▁", " "),
]
if self.is_llama_3_tokenizer:
sequence += [decoders.ByteLevel(add_prefix_space=False, trim_offsets=False, use_regex=True)]
if add_prefix_space:
sequence += [decoders.Strip(content=" ", left=1)]
return decoders.Sequence(sequence)
def converted(self):
# Copied partly from converted method in SpmConverter class
tokenizer = self.tokenizer(self.proto)
# Tokenizer assemble
normalizer = self.normalizer(self.proto)
if normalizer is not None:
tokenizer.normalizer = normalizer
replacement = "▁"
add_prefix_space = True
if hasattr(self.original_tokenizer, "add_prefix_space"):
add_prefix_space = self.original_tokenizer.add_prefix_space
pre_tokenizer = self.pre_tokenizer(replacement, add_prefix_space)
if pre_tokenizer is not None:
tokenizer.pre_tokenizer = pre_tokenizer
tokenizer.decoder = self.decoder(replacement, add_prefix_space)
post_processor = self.post_processor()
if post_processor:
tokenizer.post_processor = post_processor
# HACK: patch the llama-3 tokenizer to use the corresponding pre-tokenizer
# and normalizer
if self.is_llama_3_tokenizer:
tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(
add_prefix_space=False, trim_offsets=False, use_regex=True
)
# This is tricky as the additional kwargs are passed after legacy is force-set in LlamaTokenizer's
# init.
tokenizer.normalizer = normalizers.Sequence([])
return tokenizer
class GGUFQwen2Converter(Qwen2Converter):
def __init__(self, tokenizer_dict):
self.original_tokenizer = GGUFTokenizerSkeleton(tokenizer_dict)
self.additional_kwargs = {}
def converted(self) -> Tokenizer:
vocab = {word: i for i, word in enumerate(self.original_tokenizer.tokens)}
merges = self.original_tokenizer.merges
tokenizer = super().converted(vocab, merges)
tokenizer.add_special_tokens(
[
AddedToken("<|endoftext|>", normalized=False, special=True),
AddedToken("<|im_start|>", normalized=False, special=True),
AddedToken("<|im_end|>", normalized=False, special=True),
]
)
return tokenizer
class GGUFPhi3Converter(LlamaConverter):
def __init__(self, tokenizer_dict):
self.proto = GGUFTokenizerSkeleton(tokenizer_dict)
self.original_tokenizer = self.proto
self.additional_kwargs = {}
def vocab(self, proto):
return list(zip(proto.tokens, proto.scores))
def merges(self, proto):
return proto.merges
def tokenizer(self, proto):
vocab_scores = self.vocab(self.proto)
merges = self.merges(self.proto)
bpe_vocab = {word: i for i, (word, _score) in enumerate(vocab_scores)}
tokenizer = Tokenizer(BPE(bpe_vocab, merges))
# add the special tokens from phi3 tokenizer config
tokenizer.add_special_tokens(
[
AddedToken("</s>", rstrip=True, lstrip=False, normalized=False, special=True),
AddedToken("<|endoftext|>", normalized=False, special=True),
AddedToken("<|assistant|>", rstrip=True, normalized=False, special=True),
AddedToken("<|placeholder1|>", rstrip=True, normalized=False, special=True),
AddedToken("<|placeholder2|>", rstrip=True, normalized=False, special=True),
AddedToken("<|placeholder3|>", rstrip=True, normalized=False, special=True),
AddedToken("<|placeholder4|>", rstrip=True, normalized=False, special=True),
AddedToken("<|system|>", rstrip=True, normalized=False, special=True),
AddedToken("<|end|>", rstrip=True, normalized=False, special=True),
AddedToken("<|placeholder5|>", rstrip=True, normalized=False, special=True),
AddedToken("<|placeholder6|>", rstrip=True, normalized=False, special=True),
AddedToken("<|user|>", rstrip=True, normalized=False, special=True),
]
)
self.additional_kwargs["unk_token"] = (
proto.tokens[proto.unk_token_id] if proto.unk_token_id is not None else None
)
self.additional_kwargs["eos_token"] = (
proto.tokens[proto.eos_token_id] if proto.eos_token_id is not None else None
)
self.additional_kwargs["bos_token"] = (
proto.tokens[proto.bos_token_id] if proto.bos_token_id is not None else None
)
self.additional_kwargs["pad_token"] = (
proto.tokens[proto.pad_token_id] if proto.pad_token_id is not None else None
)
return tokenizer
def decoder(self, replacement, add_prefix_space):
sequence = [
decoders.ByteFallback(),
decoders.Fuse(),
decoders.Replace(replacement, " "),
]
if add_prefix_space:
sequence += [decoders.Strip(content=" ", left=1)]
return decoders.Sequence(sequence)
def converted(self) -> Tokenizer:
tokenizer = self.tokenizer(self.proto)
replacement = "▁"
add_prefix_space = True
if hasattr(self.original_tokenizer, "add_prefix_space"):
add_prefix_space = self.original_tokenizer.add_prefix_space
tokenizer.decoder = self.decoder(replacement, add_prefix_space)
return tokenizer
class GGUFGPTConverter(GPT2Converter):
def __init__(self, tokenizer_dict):
self.original_tokenizer = GGUFTokenizerSkeleton(tokenizer_dict)
self.additional_kwargs = {}
def converted(self) -> Tokenizer:
vocab = {word: i for i, word in enumerate(self.original_tokenizer.tokens)}
merges = self.original_tokenizer.merges
tokenizer = super().converted(vocab, merges)
return tokenizer
class GGUFT5Converter(T5Converter):
def __init__(self, tokenizer_dict):
# set dummy data to avoid unnecessary merges calculation
tokenizer_dict["merges"] = ["dummy text"]
self.proto = GGUFTokenizerSkeleton(tokenizer_dict)
self.token2id = {k: v for v, k in enumerate(self.proto.tokens)}
self.original_tokenizer = self.proto
self.additional_kwargs = {}
def vocab(self, proto):
return list(zip(proto.tokens, proto.scores))
def normalizer(self, proto):
if getattr(self.original_tokenizer, "legacy", True):
sequence = []
if getattr(self.original_tokenizer, "add_prefix_space", True):
sequence += [normalizers.Prepend(prepend="▁")]
sequence += [normalizers.Replace(pattern=" ", content="▁")]
return normalizers.Sequence(sequence)
return None # non-legacy, no normalizer
def post_processor(self):
return processors.TemplateProcessing(
single=["$A", "</s>"],
pair=["$A", "</s>", "$B", "</s>"],
special_tokens=[
("</s>", self.token2id["</s>"]),
],
)
def converted(self) -> Tokenizer:
vocab_scores = self.vocab(self.proto)
tokenizer = Tokenizer(
Unigram(
vocab_scores,
unk_id=self.proto.unk_token_id,
byte_fallback=False,
)
)
# Tokenizer assemble
normalizer = self.normalizer(self.proto)
if normalizer is not None:
tokenizer.normalizer = normalizer
replacement = "▁"
add_prefix_space = True
if hasattr(self.original_tokenizer, "add_prefix_space"):
add_prefix_space = self.original_tokenizer.add_prefix_space
pre_tokenizer = self.pre_tokenizer(replacement, add_prefix_space)
if pre_tokenizer is not None:
tokenizer.pre_tokenizer = pre_tokenizer
tokenizer.decoder = self.decoder(replacement, add_prefix_space)
post_processor = self.post_processor()
if post_processor:
tokenizer.post_processor = post_processor
return tokenizer
class GGUFGemmaConverter(GemmaConverter):
def __init__(self, tokenizer_dict):
# set dummy data to avoid unnecessary merges calculation
tokenizer_dict["merges"] = ["dummy text"]
self.proto = GGUFTokenizerSkeleton(tokenizer_dict)
self.original_tokenizer = self.proto
self.additional_kwargs = {}
def vocab(self, proto):
original_vocab = list(zip(proto.tokens, proto.scores))
updated_vocab = []
for token, score in original_vocab:
if token == "<0x09>":
updated_vocab.append(("\t", score))
elif " " in token and len(token.strip()) == 0:
underscores = "▁" * len(token)
updated_vocab.append((underscores, score))
else:
updated_vocab.append((token, score))
return updated_vocab
def normalizer(self, proto):
return normalizers.Replace(" ", "▁")
def decoder(self, replacement, add_prefix_space):
sequence = [
decoders.Replace("▁", " "),
decoders.ByteFallback(),
decoders.Fuse(),
]
if add_prefix_space:
sequence += [decoders.Strip(content=" ", left=1)]
return decoders.Sequence(sequence)
def converted(self) -> Tokenizer:
vocab_scores = self.vocab(self.proto)
tokenizer = Tokenizer(
Unigram(
vocab_scores,
unk_id=self.proto.unk_token_id,
byte_fallback=self.handle_byte_fallback,
)
)
normalizer = self.normalizer(self.proto)
if normalizer is not None:
tokenizer.normalizer = normalizer
replacement = "▁"
add_prefix_space = True
if hasattr(self.original_tokenizer, "add_prefix_space"):
add_prefix_space = self.original_tokenizer.add_prefix_space
tokenizer.decoder = self.decoder(replacement, add_prefix_space)
pre_tokenizer = self.pre_tokenizer(replacement, add_prefix_space)
if pre_tokenizer is not None:
tokenizer.pre_tokenizer = pre_tokenizer
return tokenizer
GGUF_TO_FAST_CONVERTERS = {
"llama": GGUFLlamaConverter,
"qwen2": GGUFQwen2Converter,
"qwen2_moe": GGUFQwen2Converter,
"qwen3": GGUFQwen2Converter,
"qwen3_moe": GGUFQwen2Converter,
"phi3": GGUFPhi3Converter,
"bloom": GGUFGPTConverter,
"falcon": GGUFGPTConverter,
"stablelm": GGUFGPTConverter,
"gpt2": GGUFGPTConverter,
"starcoder2": GGUFGPTConverter,
"t5": GGUFT5Converter,
"mamba": GGUFGPTConverter,
"nemotron": GGUFGPTConverter,
"gemma2": GGUFGemmaConverter,
"gemma3_text": GGUFGemmaConverter,
"umt5": GGUFT5Converter,
"deci": GGUFLlamaConverter,
"decilm": GGUFLlamaConverter,
}
def convert_gguf_tokenizer(architecture: str, tokenizer_dict) -> tuple[Tokenizer, dict]:
"""
Utilities to convert a slow tokenizer instance in a fast tokenizer instance.
Args:
architecture (`str`): The model architecture derived from gguf file.
transformer_tokenizer ([`~tokenization_utils_base.PreTrainedTokenizer`]):
Instance of a slow tokenizer to convert in the backend tokenizer for
[`~tokenization_utils_base.PreTrainedTokenizerFast`].
Return:
A instance of [`~tokenizers.Tokenizer`] to be used as the backend tokenizer of a
[`~tokenization_utils_base.PreTrainedTokenizerFast`]
"""
tokenizer_class_name = architecture
converter = GGUF_TO_FAST_CONVERTERS[tokenizer_class_name](tokenizer_dict)
fast_tokenizer = converter.converted()
return fast_tokenizer, converter.additional_kwargs
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/integrations/deepspeed.py | src/transformers/integrations/deepspeed.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Integration with Deepspeed
"""
import copy
import importlib.metadata as importlib_metadata
import importlib.util
import weakref
from functools import partialmethod
from ..dependency_versions_check import dep_version_check
from ..utils import is_accelerate_available, is_torch_available, logging
if is_torch_available():
import torch
from torch import nn
logger = logging.get_logger(__name__)
def is_deepspeed_available():
package_exists = importlib.util.find_spec("deepspeed") is not None
# Check we're not importing a "deepspeed" directory somewhere but the actual library by trying to grab the version
# AND checking it has an author field in the metadata that is HuggingFace.
if package_exists:
try:
_ = importlib_metadata.metadata("deepspeed")
return True
except importlib_metadata.PackageNotFoundError:
return False
if is_accelerate_available() and is_deepspeed_available():
from accelerate.utils.deepspeed import HfDeepSpeedConfig as DeepSpeedConfig
else:
# Inherits from a dummy `object` if accelerate is not available, so that python succeeds to import this file.
# Deepspeed glue code will never inherit this dummy object as it checks if accelerate is available.
from builtins import object as DeepSpeedConfig
class HfDeepSpeedConfig(DeepSpeedConfig):
"""
This object contains a DeepSpeed configuration dictionary and can be quickly queried for things like zero stage.
A `weakref` of this object is stored in the module's globals to be able to access the config from areas where
things like the Trainer object is not available (e.g. `from_pretrained` and `_get_resized_embeddings`). Therefore
it's important that this object remains alive while the program is still running.
[`Trainer`] uses the `HfTrainerDeepSpeedConfig` subclass instead. That subclass has logic to sync the configuration
with values of [`TrainingArguments`] by replacing special placeholder values: `"auto"`. Without this special logic
the DeepSpeed configuration is not modified in any way.
Args:
config_file_or_dict (`Union[str, Dict]`): path to DeepSpeed config file or dict.
"""
def __init__(self, config_file_or_dict):
# set global weakref object
set_hf_deepspeed_config(self)
dep_version_check("accelerate")
dep_version_check("deepspeed")
super().__init__(config_file_or_dict)
class HfTrainerDeepSpeedConfig(HfDeepSpeedConfig):
"""
The `HfTrainerDeepSpeedConfig` object is meant to be created during `TrainingArguments` object creation and has the
same lifespan as the latter.
"""
def __init__(self, config_file_or_dict):
super().__init__(config_file_or_dict)
self._dtype = None
self.mismatches = []
def dtype(self):
if self._dtype is None:
raise ValueError("trainer_config_process() wasn't called yet to tell dtype")
return self._dtype
def is_auto(self, ds_key_long):
val = self.get_value(ds_key_long)
if val is None:
return False
else:
return val == "auto"
def fill_match(self, ds_key_long, hf_val, hf_key=None, must_match=True):
"""
A utility method that massages the config file and can optionally verify that the values match.
1. Replace "auto" values with `TrainingArguments` value.
2. If it wasn't "auto" and `must_match` is true, then check that DS config matches Trainer
config values and if mismatched add the entry to `self.mismatched` - will assert during
`trainer_config_finalize` for one or more mismatches.
"""
config, ds_key = self.find_config_node(ds_key_long)
if config is None:
return
if config.get(ds_key) == "auto":
config[ds_key] = hf_val
return
if not must_match:
return
ds_val = config.get(ds_key)
if ds_val is not None and ds_val != hf_val:
self.mismatches.append(f"- ds {ds_key_long}={ds_val} vs hf {hf_key}={hf_val}")
fill_only = partialmethod(fill_match, must_match=False)
def trainer_config_process(self, args, auto_find_batch_size=False):
"""
Adjust the config with `TrainingArguments` values. This stage is run during `TrainingArguments` object
creation.
"""
# DeepSpeed does:
# train_batch_size = world_size * train_micro_batch_size_per_gpu * gradient_accumulation_steps
train_batch_size = args.world_size * args.per_device_train_batch_size * args.gradient_accumulation_steps
self.fill_match(
"train_micro_batch_size_per_gpu",
args.per_device_train_batch_size,
"per_device_train_batch_size",
not auto_find_batch_size,
)
self.fill_match(
"gradient_accumulation_steps",
args.gradient_accumulation_steps,
"gradient_accumulation_steps",
)
self.fill_match(
"train_batch_size",
train_batch_size,
"train_batch_size (calculated)",
not auto_find_batch_size,
)
self.fill_match("gradient_clipping", args.max_grad_norm, "max_grad_norm")
self.fill_match("optimizer.params.lr", args.learning_rate, "learning_rate")
self.fill_match(
"optimizer.params.betas",
[args.adam_beta1, args.adam_beta2],
"adam_beta1+adam_beta2",
)
self.fill_match("optimizer.params.eps", args.adam_epsilon, "adam_epsilon")
self.fill_match("optimizer.params.weight_decay", args.weight_decay, "weight_decay")
self.fill_only("scheduler.params.warmup_min_lr", 0) # not a trainer arg
self.fill_match("scheduler.params.warmup_max_lr", args.learning_rate, "learning_rate")
# total_num_steps - will get set in trainer_config_finalize
if args.save_on_each_node:
# deepspeed uses shared storage by default. Let's override this setting if save_on_each_node == True
self.config["checkpoint"] = self.config.get("checkpoint", {})
self.config["checkpoint"]["use_node_local_storage"] = args.save_on_each_node
# amp: similar to the pytorch native amp - it has a bunch of optional params but we won't set
# any here unless the user did the work
self.fill_match("fp16.enabled", (args.fp16 or args.fp16_full_eval), "fp16|fp16_full_eval")
self.fill_match("bf16.enabled", (args.bf16 or args.bf16_full_eval), "bf16|bf16_full_eval")
# deepspeed's default mode is fp16 unless there is a config that says differently
if self.is_true("bf16.enabled"):
self._dtype = torch.bfloat16
elif self.is_true("fp16.enabled"):
self._dtype = torch.float16
else:
self._dtype = torch.float32
def trainer_config_finalize(self, args, model, num_training_steps):
"""
This stage is run after we have the model and know num_training_steps.
Now we can complete the configuration process.
"""
# zero
# deal with config keys that use `auto` value and rely on model's hidden_size
hidden_size_based_keys = [
"zero_optimization.reduce_bucket_size",
"zero_optimization.stage3_prefetch_bucket_size",
"zero_optimization.stage3_param_persistence_threshold",
]
hidden_size_auto_keys = [x for x in hidden_size_based_keys if self.is_auto(x)]
if len(hidden_size_auto_keys) > 0:
hidden_size = None
if hasattr(model, "config"):
if hasattr(model.config, "hidden_size"):
hidden_size = model.config.hidden_size
elif hasattr(model.config, "hidden_sizes"):
# if there are many hidden sizes pick the largest one
hidden_size = max(model.config.hidden_sizes)
elif hasattr(model.config, "text_config") and hasattr(model.config.text_config, "hidden_size"):
hidden_size = model.config.text_config.hidden_size
elif hasattr(model.config, "text_config") and hasattr(model.config.text_config, "hidden_sizes"):
# if there are many hidden sizes pick the largest one
hidden_size = max(model.config.text_config.hidden_sizes)
if hidden_size is None:
raise ValueError(
"The model's config file has neither `hidden_size` nor `hidden_sizes` entry, "
"therefore it's not possible to automatically fill out the following `auto` entries "
f"in the DeepSpeed config file: {hidden_size_auto_keys}. You can fix that by replacing "
"`auto` values for these keys with an integer value of your choice."
)
self.fill_only("zero_optimization.reduce_bucket_size", hidden_size * hidden_size)
if self.is_zero3():
# automatically assign the optimal config values based on model config
self.fill_only(
"zero_optimization.stage3_prefetch_bucket_size",
int(0.9 * hidden_size * hidden_size),
)
self.fill_only(
"zero_optimization.stage3_param_persistence_threshold",
10 * hidden_size,
)
# scheduler
self.fill_match(
"scheduler.params.total_num_steps",
num_training_steps,
"num_training_steps (calculated)",
)
self.fill_match(
"scheduler.params.warmup_num_steps",
args.get_warmup_steps(num_training_steps),
"warmup_steps",
)
if len(self.mismatches) > 0:
mismatches = "\n".join(self.mismatches)
raise ValueError(
"Please correct the following DeepSpeed config values that mismatch TrainingArguments"
f" values:\n{mismatches}\nThe easiest method is to set these DeepSpeed config values to 'auto'."
)
# keep the config object global to be able to access it anywhere during TrainingArguments life-cycle
_hf_deepspeed_config_weak_ref = None
def set_hf_deepspeed_config(hf_deepspeed_config_obj):
# this is a special weakref global object to allow us to get to Deepspeed config from APIs
# that don't have an easy way to get to the Deepspeed config outside of the Trainer domain.
global _hf_deepspeed_config_weak_ref
# will go away automatically when HfDeepSpeedConfig is destroyed (when TrainingArguments is destroyed)
_hf_deepspeed_config_weak_ref = weakref.ref(hf_deepspeed_config_obj)
def unset_hf_deepspeed_config():
# useful for unit tests to ensure the global state doesn't leak - call from `tearDown` method
global _hf_deepspeed_config_weak_ref
_hf_deepspeed_config_weak_ref = None
def is_deepspeed_zero3_enabled():
if _hf_deepspeed_config_weak_ref is not None and _hf_deepspeed_config_weak_ref() is not None:
return _hf_deepspeed_config_weak_ref().is_zero3()
else:
return False
def deepspeed_config():
if _hf_deepspeed_config_weak_ref is not None and _hf_deepspeed_config_weak_ref() is not None:
return _hf_deepspeed_config_weak_ref().config
else:
return None
def _load_state_dict_into_zero3_model(model_to_load, state_dict):
"""
Loads state dict into a model specifically for Zero3, since DeepSpeed does not support the `transformers`
tensor parallelism API.
Nearly identical code to PyTorch's `_load_from_state_dict`
"""
# copy state_dict so `_load_state_dict_into_zero3_model` can modify it
metadata = getattr(state_dict, "_metadata", None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
error_msgs = []
meta_model_state_dict = model_to_load.state_dict()
missing_keys = set(meta_model_state_dict.keys())
prefix_model = getattr(model_to_load, "base_model_prefix", None)
# take care of the case where in the checkpoint we don't have the prefix
state_dict = {
(f"{prefix_model}.{k}" if meta_model_state_dict.get(f"{prefix_model}.{k}") is not None else k): v
for k, v in state_dict.items()
}
# PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants
# so we need to apply the function recursively.
def load(module: nn.Module, state_dict, prefix="", assign_to_params_buffers=False):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
local_metadata["assign_to_params_buffers"] = assign_to_params_buffers
args = (state_dict, prefix, local_metadata, True, [], [], error_msgs)
# Parameters of module and children will start with prefix. We can exit early if there are none in this
# state_dict
if is_deepspeed_zero3_enabled():
import deepspeed
# In sharded models, each shard has only part of the full state_dict, so only gather
# parameters that are in the current state_dict.
named_parameters = dict(module.named_parameters(prefix=prefix[:-1], recurse=False))
params_to_gather = []
for k in named_parameters:
if k in state_dict:
param = named_parameters[k]
# crutial to not init the weight again
param._is_hf_initialized = True
params_to_gather.append(param)
missing_keys.discard(k)
if len(params_to_gather) > 0:
# because zero3 puts placeholders in model params, this context
# manager gathers (unpartitions) the params of the current layer, then loads from
# the state dict and then re-partitions them again
with deepspeed.zero.GatheredParameters(params_to_gather, modifier_rank=0):
if torch.distributed.get_rank() == 0:
module._load_from_state_dict(*args)
for name, child in module._modules.items():
if child is not None:
load(child, state_dict, prefix + name + ".", assign_to_params_buffers)
load(model_to_load, state_dict, assign_to_params_buffers=False)
return error_msgs, missing_keys
def deepspeed_optim_sched(trainer, hf_deepspeed_config, args, num_training_steps, model_parameters):
"""
A convenience wrapper that deals with optimizer and lr scheduler configuration.
"""
from accelerate.utils import DummyOptim, DummyScheduler
config = hf_deepspeed_config.config
# Mixing and matching DS schedulers and optimizers is supported unless Offload is enabled in which case it's:
# 1. DS scheduler + DS optimizer: Yes
# 2. HF scheduler + HF optimizer: Mostly*
# 3. DS scheduler + HF optimizer: Mostly*
# 4. HF scheduler + DS optimizer: Yes
#
# Mostly*: All non-native DeepSpeed optimizers that have both CPU and GPU implementation should work (except LAMB)
optimizer = None
if "optimizer" in config:
optimizer = DummyOptim(params=model_parameters)
else:
if hf_deepspeed_config.is_offload():
logger.info(
"Detected ZeRO Offload and non-DeepSpeed optimizers: This combination should work as long as the"
" custom optimizer has both CPU and GPU implementation (except LAMB)"
)
# ds supports Adam, OneBitAdam, and Lamb optimizers and can import other optimizers from torch.
# But trainer uses AdamW by default.
optimizer = trainer.create_optimizer()
# To use other optimizers requires voiding warranty with: `zero_allow_untested_optimizer`
config["zero_allow_untested_optimizer"] = True
lr_scheduler = None
if "scheduler" in config:
lr_scheduler = DummyScheduler(optimizer)
else:
if isinstance(optimizer, DummyOptim):
def _lr_scheduler_callable(optimizer):
# create a shallow copy first, so later modifications do not affect original trainer
trainer_copy = copy.copy(trainer)
# at the time _lr_scheduler_callable is called, trainer.lr_scheduler has been set
# update it to None so that we can re-create a new scheduler
trainer_copy.lr_scheduler = None
lr_scheduler = trainer_copy.create_scheduler(
num_training_steps=num_training_steps, optimizer=optimizer
)
return lr_scheduler
lr_scheduler = DummyScheduler(optimizer, lr_scheduler_callable=_lr_scheduler_callable)
else:
lr_scheduler = trainer.create_scheduler(num_training_steps=num_training_steps, optimizer=optimizer)
return optimizer, lr_scheduler
def deepspeed_init(trainer, num_training_steps, inference=False):
"""
Init DeepSpeed, after updating the DeepSpeed configuration with any relevant Trainer's args.
If `resume_from_checkpoint` was passed then an attempt to resume from a previously saved checkpoint will be made.
Args:
trainer: Trainer object
num_training_steps: per single gpu
resume_from_checkpoint: path to a checkpoint if to resume from after normal DeepSpeedEngine load
inference: launch in inference mode (no optimizer and no lr scheduler)
auto_find_batch_size: whether to ignore the `train_micro_batch_size_per_gpu` argument as it's being
set automatically by the auto batch size finder
Returns: optimizer, lr_scheduler
We may use `deepspeed_init` more than once during the life of Trainer, when we do - it's a temp hack based on:
https://github.com/deepspeedai/DeepSpeed/issues/1394#issuecomment-937405374 until Deepspeed fixes a bug where it
can't resume from a checkpoint after it did some stepping https://github.com/deepspeedai/DeepSpeed/issues/1612
"""
from deepspeed.utils import logger as ds_logger
model = trainer.model
args = trainer.args
hf_deepspeed_config = trainer.accelerator.state.deepspeed_plugin.hf_ds_config
# resume config update - some bits like `model` and `num_training_steps` only become available during train
hf_deepspeed_config.trainer_config_finalize(args, model, num_training_steps)
# set the Deepspeed log level consistent with the Trainer
ds_logger.setLevel(args.get_process_log_level())
if inference:
# only Z3 makes sense for the inference
if not hf_deepspeed_config.is_zero3():
raise ValueError("ZeRO inference only makes sense with ZeRO Stage 3 - please adjust your config")
# in case the training config is re-used for inference
hf_deepspeed_config.del_config_sub_tree("optimizer")
hf_deepspeed_config.del_config_sub_tree("lr_scheduler")
optimizer, lr_scheduler = None, None
model_parameters = None
else:
trainer.optimizer = None # important for when deepspeed_init is used as re-init
deepspeed_tp_size = hf_deepspeed_config.config.get("tensor_parallel", {}).get("autotp_size", 1)
if deepspeed_tp_size > 1:
import deepspeed
model = deepspeed.tp_model_init(
model=model,
tp_size=deepspeed_tp_size,
dtype=hf_deepspeed_config.dtype(),
config=hf_deepspeed_config.config,
)
model_parameters = list(filter(lambda p: p.requires_grad, model.parameters()))
optimizer, lr_scheduler = deepspeed_optim_sched(
trainer, hf_deepspeed_config, args, num_training_steps, model_parameters
)
# keep for quick debug:
# from pprint import pprint; pprint(config)
return optimizer, lr_scheduler
def deepspeed_load_checkpoint(deepspeed_engine, checkpoint_path, load_module_strict=True):
# it's possible that the user is trying to resume from model_path, which doesn't necessarily
# contain a deepspeed checkpoint. e.g. examples just check if the dir exists and assume it's
# a resume from a checkpoint and not just a local pretrained weight. So we check here if the
# path contains what looks like a deepspeed checkpoint
import glob
deepspeed_checkpoint_dirs = sorted(glob.glob(f"{checkpoint_path}/global_step*"))
if len(deepspeed_checkpoint_dirs) > 0:
logger.info(f"Attempting to resume from {checkpoint_path}")
# this magically updates self.optimizer and self.lr_scheduler
load_path, _ = deepspeed_engine.load_checkpoint(
checkpoint_path,
load_module_strict=load_module_strict,
load_optimizer_states=True,
load_lr_scheduler_states=True,
)
if load_path is None:
raise ValueError(f"[deepspeed] failed to resume from checkpoint {checkpoint_path}")
else:
raise ValueError(f"Can't find a valid checkpoint at {checkpoint_path}")
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/integrations/higgs.py | src/transformers/integrations/higgs.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"HIGGS through FLUTE (Flexible Lookup Table Engine for LUT-quantized LLMs) integration file"
from math import sqrt
from ..quantizers.quantizers_utils import should_convert_module
from ..utils import is_flute_available, is_hadamard_available, is_torch_available, logging
if is_torch_available():
import torch
import torch.nn as nn
if is_flute_available():
from flute.integrations.higgs import prepare_data_transposed
from flute.tune import TuneMetaData, qgemm_v2
if is_hadamard_available():
from fast_hadamard_transform import hadamard_transform
logger = logging.get_logger(__name__)
def pad_to_block(tensor, dims, had_block_size, value=0):
pad_dims = [0 for _ in range(2 * len(tensor.shape))]
for dim in dims:
size = tensor.shape[dim]
next_multiple_of_1024 = ((size - 1) // had_block_size + 1) * had_block_size
delta = next_multiple_of_1024 - size
pad_dims[-2 * dim - 1] = delta
return nn.functional.pad(tensor, pad_dims, "constant", value)
def get_higgs_grid(p: int, n: int) -> "torch.Tensor":
if (p, n) == (2, 256):
return torch.tensor(
[
[-2.501467704772949, 0.17954708635807037],
[-0.6761789321899414, 1.2728623151779175],
[-1.8025816679000854, 0.7613157629966736],
[-0.538287878036499, -2.6028504371643066],
[0.8415029644966125, -0.8600977659225464],
[0.7023013234138489, 3.3138747215270996],
[0.5699077844619751, 2.5782253742218018],
[3.292393207550049, -0.6016128063201904],
[0.5561617016792297, -1.7723814249038696],
[-2.1012380123138428, 0.020958125591278076],
[0.46085724234580994, 0.8428705334663391],
[1.4548040628433228, -0.6156039237976074],
[3.210029363632202, 0.3546904921531677],
[0.8893890976905823, -0.5967988967895508],
[0.8618854284286499, -3.2061192989349365],
[1.1360996961593628, -0.23852407932281494],
[1.6646337509155273, -0.9265465140342712],
[1.4767773151397705, 1.2476022243499756],
[-1.0511897802352905, 1.94503915309906],
[-1.56318998336792, -0.3264186680316925],
[-0.1829211413860321, 0.2922491431236267],
[-0.8950616717338562, -1.3887052536010742],
[-0.08206957578659058, -1.329533576965332],
[-0.487422913312912, 1.4817842245101929],
[-1.6769757270812988, -2.8269758224487305],
[-1.5057679414749146, 1.8905963897705078],
[1.8335362672805786, 1.0515104532241821],
[0.3273945450782776, 1.0491033792495728],
[-3.295924186706543, -0.7021600008010864],
[-1.8428784608840942, -1.2315762042999268],
[-0.8575026392936707, -1.7005949020385742],
[-1.120667815208435, 0.6467998027801514],
[-0.1588846743106842, -1.804071068763733],
[-0.8539647459983826, 0.5645008683204651],
[-1.4192019701004028, -0.6175029873847961],
[1.0799058675765991, 1.7871345281600952],
[1.171311855316162, 0.7511613965034485],
[2.162078380584717, 0.8044339418411255],
[1.3969420194625854, -1.243762493133545],
[-0.23818807303905487, 0.053944624960422516],
[2.304199457168579, -1.2667627334594727],
[1.4225027561187744, 0.568610668182373],
[0.376836895942688, -0.7134661674499512],
[2.0404467582702637, 0.4087389409542084],
[0.7639489769935608, -1.1367933750152588],
[0.3622530400753021, -1.4827953577041626],
[0.4100743532180786, 0.36108437180519104],
[-1.5867475271224976, -1.618212342262268],
[-2.2769672870635986, -1.2132309675216675],
[0.9184022545814514, -0.34428009390830994],
[-0.3902314603328705, 0.21785245835781097],
[3.120687484741211, 1.3077973127365112],
[1.587440848350525, -1.6506884098052979],
[-1.718808889389038, -0.038405973464250565],
[-0.6888407468795776, -0.8402308821678162],
[-0.7981445789337158, -1.1117373704910278],
[-2.4124443531036377, 1.3419722318649292],
[-0.6611530184745789, 0.9939885139465332],
[-0.33103418350219727, -0.16702833771705627],
[-2.4091389179229736, -2.326857566833496],
[1.6610108613967896, -2.159703254699707],
[0.014884627424180508, 0.3887578248977661],
[0.029668325558304787, 1.8786455392837524],
[1.180362582206726, 2.699317216873169],
[1.821286678314209, -0.5960053205490112],
[-0.44835323095321655, 3.327436685562134],
[-0.3714401423931122, -2.1466753482818604],
[-1.1103475093841553, -2.4536871910095215],
[-0.39110705256462097, 0.6670510172843933],
[0.474752813577652, -1.1959707736968994],
[-0.013110585510730743, -2.52519154548645],
[-2.0836575031280518, -1.703289270401001],
[-1.1077687740325928, -0.1252644956111908],
[-0.4138077199459076, 1.1837692260742188],
[-1.977599024772644, 1.688241720199585],
[-1.659559965133667, -2.1387736797332764],
[0.03242531046271324, 0.6526556015014648],
[0.9127950072288513, 0.6099498867988586],
[-0.38478314876556396, 0.433487206697464],
[0.27454206347465515, -0.27719801664352417],
[0.10388526320457458, 2.2812814712524414],
[-0.014394169673323631, -3.177137613296509],
[-1.2871228456497192, -0.8961855173110962],
[0.5720916986465454, -0.921597957611084],
[1.1159656047821045, -0.7609877586364746],
[2.4383342266082764, -2.2983546257019043],
[-0.294057160615921, -0.9770799875259399],
[-0.9342701435089111, 1.107579231262207],
[-1.549338698387146, 3.090520143508911],
[2.6076579093933105, 2.051239013671875],
[-0.9259037375450134, 1.407211184501648],
[-0.1747353971004486, 0.540488600730896],
[-0.8963701725006104, 0.8271111249923706],
[0.6480194926261902, 1.0128909349441528],
[0.980783998966217, -0.06156221032142639],
[-0.16883476078510284, 1.0601658821105957],
[0.5839992761611938, 0.004697148688137531],
[-0.34228450059890747, -1.2423977851867676],
[2.500824451446533, 0.3665279746055603],
[-0.17641609907150269, 1.3529551029205322],
[0.05378641560673714, 2.817232847213745],
[-1.2391047477722168, 2.354328155517578],
[0.630434513092041, -0.668536365032196],
[1.7576488256454468, 0.6738647818565369],
[0.4435231387615204, 0.6000469326972961],
[-0.08794835954904556, -0.11511358618736267],
[1.6540337800979614, 0.33995017409324646],
[-0.04202975332736969, -0.5375117063522339],
[-0.4247745871543884, -0.7897617220878601],
[0.06695003807544708, 1.2000739574432373],
[-3.2508881092071533, 0.28734830021858215],
[-1.613816261291504, 0.4944162368774414],
[1.3598989248275757, 0.26117825508117676],
[2.308382511138916, 1.3462618589401245],
[-1.2137469053268433, -1.9254342317581177],
[-0.4889402985572815, 1.8136259317398071],
[-0.1870335340499878, -0.3480615019798279],
[1.0766386985778809, -1.0627082586288452],
[0.4651014506816864, 2.131748914718628],
[-0.1306295394897461, -0.7811847925186157],
[0.06433182954788208, -1.5397958755493164],
[-0.2894323468208313, -0.5789554715156555],
[-0.6081662178039551, 0.4845278263092041],
[2.697964668273926, -0.18515698611736298],
[0.1277363896369934, -0.7221432328224182],
[0.8700758218765259, 0.35042452812194824],
[0.22088994085788727, 0.495242178440094],
[-2.5843818187713623, -0.8000828623771667],
[0.6732649803161621, -1.4362232685089111],
[-1.5286413431167603, 1.0417330265045166],
[-1.1222513914108276, -0.6269875764846802],
[-0.9752035140991211, -0.8750635385513306],
[-2.6369473934173584, 0.6918523907661438],
[0.14478731155395508, -0.041986867785453796],
[-1.5629483461380005, 1.4369450807571411],
[0.38952457904815674, -2.16428804397583],
[-0.16885095834732056, 0.7976621985435486],
[-3.12416934967041, 1.256506085395813],
[0.6843105554580688, -0.4203019142150879],
[1.9345275163650513, 1.934950351715088],
[0.012184220366179943, -2.1080918312072754],
[-0.6350273489952087, 0.7358828186988831],
[-0.837304949760437, -0.6214472651481628],
[0.08211923390626907, -0.9472538232803345],
[2.9332995414733887, -1.4956780672073364],
[1.3806978464126587, -0.2916182279586792],
[0.06773144006729126, 0.9285762310028076],
[-1.1943119764328003, 1.5963770151138306],
[1.6395620107650757, -0.32285431027412415],
[-1.390851378440857, -0.08273141086101532],
[1.816330909729004, -1.2812227010726929],
[0.7921574711799622, -2.1135804653167725],
[0.5817914605140686, 1.2644577026367188],
[1.929347038269043, -0.2386285960674286],
[0.8877345323562622, 1.190008521080017],
[1.4732073545455933, 0.8935023546218872],
[-2.8518524169921875, -1.5478795766830444],
[0.2439267635345459, 0.7576767802238464],
[0.5246709585189819, -2.606659412384033],
[1.150876760482788, 1.4073830842971802],
[-0.2643202245235443, 2.0634236335754395],
[1.555483341217041, -0.0023102816194295883],
[2.0830578804016113, -1.7225427627563477],
[-0.5424830317497253, -1.070199728012085],
[0.9168899655342102, 0.8955540060997009],
[-0.8120972514152527, 2.696739912033081],
[-0.29908373951911926, -1.5310651063919067],
[1.2320337295532227, -1.556247353553772],
[1.8612544536590576, 0.08704725652933121],
[0.22133447229862213, -1.8091708421707153],
[-0.4403655230998993, -0.38571012020111084],
[-1.88539457321167, 1.192205786705017],
[2.239687919616699, 0.004709010478109121],
[1.139495611190796, 0.45733731985092163],
[-1.507995367050171, 0.19716016948223114],
[0.46986445784568787, 1.5422041416168213],
[-1.2573751211166382, -0.35984551906585693],
[-1.7415345907211304, -0.6020717024803162],
[1.0751984119415283, 0.19006384909152985],
[2.24186635017395, -0.46343153715133667],
[0.3610347509384155, -0.07658443599939346],
[-1.3111497163772583, 0.432013601064682],
[0.6164408326148987, 0.24538464844226837],
[-1.9266542196273804, -0.3256155550479889],
[-0.5870336890220642, -0.1879584938287735],
[-1.0476511716842651, 0.3677721917629242],
[-1.229940414428711, 1.2433830499649048],
[0.18550436198711395, 0.22753673791885376],
[-0.017921989783644676, 0.12625974416732788],
[1.1659504175186157, -0.5020995736122131],
[-0.5983408093452454, -1.40438973903656],
[0.7519024014472961, -0.16282692551612854],
[0.9920787811279297, -1.344896912574768],
[-0.8103678226470947, 0.3064485788345337],
[0.6956969499588013, 1.8208192586898804],
[-2.7830491065979004, -0.2299390584230423],
[-0.34681546688079834, 2.4890666007995605],
[-1.4452646970748901, -1.2216600179672241],
[-2.1872897148132324, 0.8926076292991638],
[1.706072211265564, -2.8440372943878174],
[1.1119003295898438, -2.4923460483551025],
[-2.582794666290283, 2.0973289012908936],
[0.04987720400094986, -0.2964983284473419],
[-2.063807487487793, -0.7847916483879089],
[-0.4068813621997833, 0.9135897755622864],
[-0.9814359545707703, -0.3874954879283905],
[-1.4227229356765747, 0.7337291240692139],
[0.3065044581890106, 1.3125417232513428],
[1.2160996198654175, -1.9643305540084839],
[-1.2163853645324707, 0.14608727395534515],
[-2.3030710220336914, -0.37558120489120483],
[0.9232977628707886, 2.1843791007995605],
[-0.1989777386188507, 1.651851773262024],
[-0.714374840259552, -0.39365994930267334],
[-0.7805715799331665, -2.099881887435913],
[0.9015759229660034, -1.7053706645965576],
[0.1033422127366066, 1.5256654024124146],
[-1.8773194551467896, 2.324174165725708],
[1.9227174520492554, 2.7441604137420654],
[-0.5994020104408264, 0.23984014987945557],
[1.3496100902557373, -0.9126054644584656],
[-0.8765304088592529, -3.1877026557922363],
[-1.2040035724639893, -1.5169521570205688],
[1.4261796474456787, 2.150200128555298],
[1.463774561882019, 1.6656692028045654],
[0.20364105701446533, -0.4988172650337219],
[0.5195154547691345, -0.24067887663841248],
[-1.1116786003112793, -1.1599653959274292],
[-0.8490808606147766, -0.1681060940027237],
[0.3189965784549713, -0.9641751646995544],
[-0.5664751529693604, -0.5951744318008423],
[-1.6347930431365967, -0.9137664437294006],
[0.44048091769218445, -0.47259435057640076],
[-2.147747039794922, 0.47442489862442017],
[1.834734320640564, 1.4462147951126099],
[1.1777573823928833, 1.0659226179122925],
[-0.9568989872932434, 0.09495053440332413],
[-1.838529348373413, 0.2950586676597595],
[-0.4800611734390259, 0.014894310384988785],
[-0.5235516428947449, -1.7687653303146362],
[2.0735011100769043, -0.8825281262397766],
[2.637502431869507, 0.8455678224563599],
[2.606602907180786, -0.7848446369171143],
[-1.1886937618255615, 0.9330510497093201],
[0.38082656264305115, 0.13328030705451965],
[0.6847941875457764, 0.7384101152420044],
[1.2638574838638306, -0.007309418171644211],
[0.18292222917079926, -1.22371244430542],
[0.8143821954727173, 1.4976691007614136],
[0.6571850776672363, 0.48368802666664124],
[-0.6991601586341858, 2.150190830230713],
[0.8101756572723389, 0.10206498205661774],
[-0.08768226951360703, -1.084917664527893],
[-0.7208092212677002, 0.03657956421375275],
[0.3211449086666107, 1.803687334060669],
[-0.7835946083068848, 1.6869111061096191],
]
)
if (p, n) == (2, 64):
return torch.tensor(
[
[-2.7216711044311523, 0.14431366324424744],
[-0.766914427280426, 1.7193410396575928],
[-2.2575762271881104, 1.2476624250411987],
[1.233758807182312, -2.3560616970062256],
[0.8701965808868408, -0.2649352252483368],
[1.4506438970565796, 2.1776366233825684],
[-0.06305818259716034, 1.9049758911132812],
[2.536226511001587, 0.563927412033081],
[0.4599496126174927, -1.8745561838150024],
[-1.900517225265503, -0.30703988671302795],
[0.09386251866817474, 0.8755807280540466],
[1.946500539779663, -0.6743080615997314],
[2.1338934898376465, 1.4581491947174072],
[0.9429940581321716, -0.8038390278816223],
[2.0697755813598633, -1.614896535873413],
[0.772676408290863, 0.22017823159694672],
[1.0689979791641235, -1.525044322013855],
[0.6813604831695557, 1.1345642805099487],
[0.4706456661224365, 2.606626272201538],
[-1.294018030166626, -0.4372096061706543],
[-0.09134224057197571, 0.4610418677330017],
[-0.7907772064208984, -0.48412787914276123],
[0.060459110885858536, -0.9172890186309814],
[-0.5855047702789307, 2.56172513961792],
[0.11484206467866898, -2.659848213195801],
[-1.5893300771713257, 2.188580274581909],
[1.6750942468643188, 0.7089915871620178],
[-0.445697546005249, 0.7452405095100403],
[-1.8539940118789673, -1.8377939462661743],
[-1.5791912078857422, -1.017285943031311],
[-1.030419945716858, -1.5746369361877441],
[-1.9511750936508179, 0.43696075677871704],
[-0.3446580767631531, -1.8953213691711426],
[-1.4219647645950317, 0.7676230669021606],
[-0.9191089272499084, 0.5021472573280334],
[0.20464491844177246, 1.3684605360031128],
[0.5402919054031372, 0.6699410676956177],
[1.8903915882110596, 0.03638288006186485],
[0.4723062515258789, -0.6216739416122437],
[-0.41345009207725525, -0.22752176225185394],
[2.7119064331054688, -0.5111885070800781],
[1.065286636352539, 0.6950305700302124],
[0.40629103779792786, -0.14339995384216309],
[1.2815024852752686, 0.17108257114887238],
[0.01785222627222538, -0.43778058886528015],
[0.054590027779340744, -1.4225547313690186],
[0.3076786696910858, 0.30697619915008545],
[-0.9498570561408997, -0.9576997756958008],
[-2.4640724658966064, -0.9660449028015137],
[1.3714425563812256, -0.39760473370552063],
[-0.4857747256755829, 0.2386789172887802],
[1.2797833681106567, 1.3097363710403442],
[0.5508887767791748, -1.1777795553207397],
[-1.384316325187683, 0.1465839296579361],
[-0.46556955575942993, -1.2442727088928223],
[-0.3915477693080902, -0.7319604158401489],
[-1.4005504846572876, 1.3890998363494873],
[-0.8647305965423584, 1.0617644786834717],
[-0.8901953101158142, -0.01650036871433258],
[-0.9893633723258972, -2.4662880897521973],
[1.445534110069275, -1.049334168434143],
[-0.041650623083114624, 0.012734669260680676],
[-0.3302375078201294, 1.26217782497406],
[0.6934980154037476, 1.7714335918426514],
]
)
elif (p, n) == (2, 16):
return torch.tensor(
[
[-0.8996632695198059, -1.6360418796539307],
[-0.961183488368988, 1.5999565124511719],
[-1.882026195526123, 0.678778350353241],
[0.36300793290138245, -1.9667866230010986],
[-0.6814072728157043, -0.576818585395813],
[0.7270012497901917, 0.6186859607696533],
[0.3359416127204895, 1.8371193408966064],
[1.859930396080017, 0.036668598651885986],
[0.17208248376846313, -0.9401724338531494],
[-1.7599700689315796, -0.6244229674339294],
[-0.8993809223175049, 0.32267823815345764],
[0.839488685131073, -0.3017036020755768],
[1.5314953327178955, 1.2942044734954834],
[-0.0011779458727687597, 0.00022069070837460458],
[1.4274526834487915, -1.207889199256897],
[-0.16123905777931213, 0.8787511587142944],
]
)
elif (p, n) == (1, 16):
return torch.tensor(
[
[-2.7325894832611084],
[-2.069017171859741],
[-1.6180464029312134],
[-1.2562311887741089],
[-0.9423404335975647],
[-0.6567591428756714],
[-0.38804829120635986],
[-0.12839503586292267],
[0.12839503586292267],
[0.38804829120635986],
[0.6567591428756714],
[0.9423404335975647],
[1.2562311887741089],
[1.6180464029312134],
[2.069017171859741],
[2.7325894832611084],
]
)
elif (p, n) == (1, 8):
return torch.tensor(
[
[-2.1519455909729004],
[-1.3439092636108398],
[-0.7560052871704102],
[-0.2450941801071167],
[0.2450941801071167],
[0.7560052871704102],
[1.3439092636108398],
[2.1519455909729004],
]
)
elif (p, n) == (1, 4):
return torch.tensor([[-1.5104175806045532], [-0.4527800381183624], [0.4527800381183624], [1.5104175806045532]])
else:
raise NotImplementedError(f"Unsupported p={p}, n={n}")
def quantize_with_higgs(weight, bits: int = 4, p: int = 2, group_size: int = 256, hadamard_size: int = 1024):
assert len(weight.shape) == 2, "Only 2D weights are supported for now"
grid = get_higgs_grid(p, 2 ** (p * bits)).to(weight.device)
grid_norm_2 = torch.linalg.norm(grid, axis=-1) ** 2
device = weight.device
dtype = weight.dtype
weight = weight.to(copy=True, dtype=torch.float32)
# Pad to Hadamard transform size
weight = pad_to_block(weight, [1], hadamard_size)
# Scale and Hadamard transform
mult = weight.shape[1] // hadamard_size
weight = weight.reshape(-1, mult, hadamard_size)
scales = torch.linalg.norm(weight, axis=-1)
weight = hadamard_transform(weight, 1) / scales[:, :, None]
# Pad to edenn_d and project
weight = pad_to_block(weight, [2], p).reshape(weight.shape[0], mult, -1, p)
# Quantize
codes = torch.empty(weight.shape[:-1], device=device, dtype=torch.uint8)
for i in range(0, weight.shape[0], 16):
codes[i : i + 16] = torch.argmax(2 * weight[i : i + 16] @ grid.T - grid_norm_2, dim=-1).to(torch.uint8)
del weight
codes = codes.reshape(codes.shape[0], -1)
scales = scales / sqrt(hadamard_size)
weight, scales, tables, tables2, tune_metadata = prepare_data_transposed(
codes,
torch.repeat_interleave(scales.to(dtype), hadamard_size // group_size, dim=1),
grid.to(dtype),
num_bits=bits,
group_size=group_size,
vector_size=p,
dtype=dtype,
device=device,
check_correctness=False,
)
return {
"weight": weight,
"scales": scales,
"tables": tables,
"tables2": tables2.view(dtype=torch.float16),
"tune_metadata": tune_metadata,
}
class HiggsLinear(torch.nn.Module):
def __init__(
self,
in_features: int,
out_features: int,
num_bits: int,
bias=True,
dtype: torch.dtype | None = None,
device: torch.device | None = None,
group_size: int = 256,
hadamard_size: int = 1024,
):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.num_bits = num_bits
self.group_size = group_size
self.hadamard_size = hadamard_size
assert in_features % group_size == 0
assert num_bits in [2, 3, 4]
self.weight = nn.Parameter(
torch.empty((out_features * num_bits // 16, in_features), dtype=torch.int16, device=device),
requires_grad=False,
)
self.scales = nn.Parameter(
torch.empty((out_features, in_features // group_size), dtype=dtype, device=device), requires_grad=False
)
self.tables = nn.Parameter(torch.empty((2**num_bits,), dtype=dtype, device=device), requires_grad=False)
self.tables2 = nn.Parameter(
torch.empty((2**num_bits, 2**num_bits, 2), dtype=dtype, device=device), requires_grad=False
)
if bias:
self.bias = nn.Parameter(torch.empty(out_features, device=device, dtype=dtype), requires_grad=False)
else:
self.register_parameter("bias", None)
self.workspace = None # must be set externally to be reused among layers
self.tune_metadata: TuneMetaData = None # must be set externally because architecture dependent
def forward(self, x):
x = pad_to_block(x, [-1], self.hadamard_size)
if self.workspace is None:
raise Exception("Workspace must be set before calling forward")
return qgemm_v2(
x,
self.weight,
self.scales,
self.tables,
self.tables2.view(dtype=torch.float32),
self.workspace,
self.tune_metadata,
hadamard_size=self.hadamard_size,
)
def replace_with_higgs_linear(model, modules_to_not_convert: list[str] | None = None, quantization_config=None):
"""
Public method that replaces the Linear layers of the given model with HIGGS quantized layers.
Args:
model (`torch.nn.Module`):
The model to convert, can be any `torch.nn.Module` instance.
modules_to_not_convert (`list[str]`, *optional*, defaults to `None`):
A list of nn.Linear weights to not convert. If a parameter path is in the list (e.g. `lm_head.weight`), the corresponding module will not be
converted.
quantization_config (`HiggsConfig`):
The quantization config object that contains the quantization parameters.
"""
has_been_replaced = False
# we need this to correctly materialize the weights during quantization
for module_name, module in model.named_modules():
if not should_convert_module(module_name, modules_to_not_convert):
continue
with torch.device("meta"):
if isinstance(module, nn.Linear):
new_module = HiggsLinear(
module.in_features,
module.out_features,
bias=module.bias is not None,
num_bits=quantization_config.bits,
hadamard_size=quantization_config.hadamard_size,
group_size=quantization_config.group_size,
)
new_module.source_cls = type(module)
new_module.requires_grad_(False)
model.set_submodule(module_name, new_module)
has_been_replaced = True
if not has_been_replaced:
logger.warning(
"You are loading your model using eetq but no linear modules were found in your model."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug."
)
return model
def dequantize_higgs(model, current_key_name=None):
"""
Dequantizes the HiggsLinear layers in the given model by replacing them with standard torch.nn.Linear layers.
Args:
model (torch.nn.Module): The model containing HiggsLinear layers to be dequantized.
current_key_name (list, optional): A list to keep track of the current module names during recursion. Defaults to None.
Returns:
torch.nn.Module: The model with HiggsLinear layers replaced by torch.nn.Linear layers.
"""
with torch.no_grad():
for name, module in model.named_children():
if current_key_name is None:
current_key_name = []
current_key_name.append(name)
if isinstance(module, HiggsLinear):
in_features = module.in_features
out_features = module.out_features
model._modules[name] = torch.nn.Linear(
in_features,
out_features,
bias=module.bias is not None,
device=module.scales.device,
dtype=module.scales.dtype,
)
model._modules[name].weight.data = module(
torch.eye(in_features, device=module.scales.device, dtype=module.scales.dtype)
).T.contiguous()
if len(list(module.children())) > 0:
_ = dequantize_higgs(
module,
current_key_name=current_key_name,
)
# Remove the last key for recursion
current_key_name.pop(-1)
return model
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/integrations/npu_flash_attention.py | src/transformers/integrations/npu_flash_attention.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import os
import torch
from ..utils.import_utils import is_torch_npu_available
if is_torch_npu_available():
from torch_npu import npu_fusion_attention
# FlashAttention2 is supported on Ascend NPU with down-right aligned causal mask by default.
# Set environment variable `NPU_FA2_SPARSE_MODE` to 2 when using top-left aligned causal mask.
TOP_LEFT_ALIGNED_CAUSAL_MASK_MODE = 2
DOWN_RIGHT_ALIGNED_CAUSAL_MASK_MODE = 3
SPARSE_MODE = int(os.getenv("NPU_FA2_SPARSE_MODE", default=DOWN_RIGHT_ALIGNED_CAUSAL_MASK_MODE))
if SPARSE_MODE not in [TOP_LEFT_ALIGNED_CAUSAL_MASK_MODE, DOWN_RIGHT_ALIGNED_CAUSAL_MASK_MODE]:
raise ValueError(
"Environment variable `NPU_FA2_SPARSE_MODE` can only be set as 2 (top-left aligned causal mask) "
"or 3 (down-right aligned causal mask)."
)
ATTN_MASK_NPU_CACHE = {}
def get_attn_mask_npu(device):
"""Get or create attention mask for the specified device."""
if device not in ATTN_MASK_NPU_CACHE:
ATTN_MASK_NPU_CACHE[device] = torch.triu(torch.ones([2048, 2048], device=device), diagonal=1).bool()
return ATTN_MASK_NPU_CACHE[device]
def is_npu_fa2_top_left_aligned_causal_mask():
return SPARSE_MODE == TOP_LEFT_ALIGNED_CAUSAL_MASK_MODE if is_torch_npu_available() else False
def npu_flash_attn_func(
q,
k,
v,
dropout_p=0.0,
softmax_scale=None,
causal=False,
**kwargs,
):
keep_prob = 1.0 - dropout_p
if softmax_scale is None:
softmax_scale = 1.0 / math.sqrt(q.shape[-1])
if not causal:
head_num = q.shape[2]
output = npu_fusion_attention(q, k, v, head_num, "BSND", keep_prob=keep_prob, scale=softmax_scale)[0]
else:
attn_mask_npu = get_attn_mask_npu(q.device)
head_num = q.shape[2]
output = npu_fusion_attention(
q,
k,
v,
head_num,
"BSND",
keep_prob=keep_prob,
scale=softmax_scale,
atten_mask=attn_mask_npu,
sparse_mode=SPARSE_MODE,
)[0]
return output
def npu_flash_attn_varlen_func(
q,
k,
v,
cu_seqlens_q,
cu_seqlens_k,
max_seqlen_q=None, # defined for aligning params order with corresponding function in `flash-attn`
max_seqlen_k=None, # defined for aligning params order with corresponding function in `flash-attn`
dropout_p=0.0,
softmax_scale=None,
causal=False,
**kwargs,
):
keep_prob = 1.0 - dropout_p
if softmax_scale is None:
softmax_scale = 1.0 / math.sqrt(q.shape[-1])
if not causal:
head_num = q.shape[1]
output = npu_fusion_attention(
q,
k,
v,
head_num,
pse=None,
atten_mask=None,
scale=softmax_scale,
keep_prob=keep_prob,
input_layout="TND",
actual_seq_qlen=tuple(cu_seqlens_q[1:].cpu().numpy().tolist()),
actual_seq_kvlen=tuple(cu_seqlens_k[1:].cpu().numpy().tolist()),
)[0]
else:
attn_mask_npu = get_attn_mask_npu(q.device)
head_num = q.shape[1]
output = npu_fusion_attention(
q,
k,
v,
head_num,
pse=None,
padding_mask=None,
atten_mask=attn_mask_npu,
scale=softmax_scale,
keep_prob=keep_prob,
input_layout="TND",
actual_seq_qlen=tuple(cu_seqlens_q[1:].cpu().numpy().tolist()),
actual_seq_kvlen=tuple(cu_seqlens_k[1:].cpu().numpy().tolist()),
sparse_mode=SPARSE_MODE,
)[0]
return output
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/integrations/tiktoken.py | src/transformers/integrations/tiktoken.py | from pathlib import Path
from typing import Any
from transformers.convert_slow_tokenizer import TikTokenConverter
from transformers.tokenization_utils_tokenizers import TIKTOKEN_VOCAB_FILE, TOKENIZER_FILE
def convert_tiktoken_to_fast(encoding: Any, output_dir: str):
"""
Converts given `tiktoken` encoding to `PretrainedTokenizerFast` and saves the configuration of converted tokenizer
on disk.
Args:
encoding (`str` or `tiktoken.Encoding`):
Tokenizer from `tiktoken` library. If `encoding` is `str`, the tokenizer will be loaded with
`tiktoken.get_encoding(encoding)`.
output_dir (`str`):
Save path for converted tokenizer configuration file.
"""
output_dir = Path(output_dir)
output_dir.mkdir(exist_ok=True)
save_file = output_dir / "tiktoken" / TIKTOKEN_VOCAB_FILE
tokenizer_file = output_dir / TOKENIZER_FILE
# Create parent directory for save_file
save_file.parent.mkdir(parents=True, exist_ok=True)
save_file_absolute = str(save_file.absolute())
output_file_absolute = str(tokenizer_file.absolute())
try:
from tiktoken import get_encoding
from tiktoken.load import dump_tiktoken_bpe
if isinstance(encoding, str):
encoding = get_encoding(encoding)
dump_tiktoken_bpe(encoding._mergeable_ranks, save_file_absolute)
except ImportError as e:
error_msg = str(e)
if "blobfile" in error_msg.lower():
raise ValueError(
"`blobfile` is required to save a `tiktoken` file. Install it with `pip install blobfile`."
) from e
raise ValueError(
"`tiktoken` is required to save a `tiktoken` file. Install it with `pip install tiktoken`."
) from e
tokenizer = TikTokenConverter(
vocab_file=save_file_absolute, pattern=encoding._pat_str, extra_special_tokens=encoding._special_tokens
).converted()
tokenizer.save(output_file_absolute)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/integrations/flash_paged.py | src/transformers/integrations/flash_paged.py | import torch
from ..generation.continuous_batching import PagedAttentionCache
from ..modeling_flash_attention_utils import lazy_import_paged_flash_attention
def paged_attention_forward(
module: torch.nn.Module,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
attention_mask: torch.Tensor | None = None,
cache: PagedAttentionCache = None,
cu_seq_lens_q=None,
cu_seq_lens_k=None,
max_seqlen_q=None,
max_seqlen_k=None,
**kwargs,
) -> torch.Tensor:
r"""Perform the forward pass of attention with paged key-value cache.
This function handles the cache updates and performs the attention computation
using the flash_attn_varlen_func for efficient processing.
Args:
q: (total_q, nheads, headdim), where total_q = total number of query tokens in the batch.
k: (total_k, nheads_k, headdim), where total_k = total number of key tokens in the batch. but if there is a block table it can be the full k
v: (total_k, nheads_k, headdim), where total_k = total number of key tokens in the batch. but if there is a block table it can be the full v
cu_seq_lens_q: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
of the sequences in the batch, used to index into q.
cu_seq_lens_k: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
of the sequences in the batch, used to index into kv.
max_seqlen_q: int. Maximum query sequence length in the batch.
max_seqlen_k: int. Maximum key sequence length in the batch.
dropout_p: float. Dropout probability.
softmax_scale: float. The scaling of QK^T before applying softmax.
Default to 1 / sqrt(headdim).
causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
window_size: (left, right). If not (-1, -1), implements sliding window local attention.
softcap: float. Anything > 0 activates softcapping attention.
"""
flash_attn_varlen_func = lazy_import_paged_flash_attention(module.config._attn_implementation)
sliding_window = (-1, -1) if not getattr(module, "sliding_window", False) else (module.sliding_window - 1, 0)
layer_type = "full_attention" if sliding_window == (-1, -1) else "sliding_attention"
# .update changes the shape of k and v from [1, num_kv_heads, seqlen_kv, head_dim] to [-1, num_kv_heads, head_dim]
if cache is not None:
k, v = cache.update(
key_states=k,
value_states=v,
layer_idx=module.layer_idx,
read_index=kwargs["read_index"],
write_index=kwargs["write_index"],
)
# Retrieve the cumulative sequence lengths for the current layer
if isinstance(cu_seq_lens_k, dict):
cu_seq_lens_k = cu_seq_lens_k[layer_type]
max_seqlen_k = max_seqlen_k[layer_type]
custom_kwargs = {"s_aux": kwargs.get("s_aux")} if "s_aux" in kwargs else {}
attn_output = flash_attn_varlen_func(
q.transpose(1, 2).squeeze(0).contiguous(),
k.contiguous(),
v.contiguous(),
cu_seq_lens_q.to(torch.int32),
cu_seq_lens_k.to(torch.int32).clone(),
max_seqlen_q,
max_seqlen_k,
softmax_scale=module.scaling,
causal=True, # kind of a must, it automatically aligns the mask for q < k
window_size=sliding_window, # -1 means infinite context window
**custom_kwargs,
)
if isinstance(attn_output, tuple):
attn_output = attn_output[0]
return attn_output, None
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/integrations/tensor_parallel.py | src/transformers/integrations/tensor_parallel.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import math
import operator
import os
import re
from functools import partial, reduce
from ..distributed import DistributedConfig
from ..utils import is_torch_greater_or_equal, logging
from ..utils.generic import GeneralInterface
from ..utils.import_utils import is_torch_available
if is_torch_available():
import torch
import torch.distributed as dist
from torch import nn
# Cache this result has it's a C FFI call which can be pretty time-consuming
_torch_distributed_available = torch.distributed.is_available()
if is_torch_greater_or_equal("2.5") and _torch_distributed_available:
from torch.distributed.tensor import DTensor, Placement, Replicate, Shard
logger = logging.get_logger(__name__)
def initialize_tensor_parallelism(
tp_plan: str | dict[str, str] | None, tp_size: int | None = None, device_mesh=None, device_map=None
):
r"""
Sets up the device mesh and initialized the backend for tensor parallelism.
This function is called when the model is loaded and the TP plan is set to 'auto'.
"""
if tp_size is not None and tp_plan is None:
raise ValueError("tp_plan has to be set when tp_size is passed.")
if tp_plan is not None and device_map is not None:
raise ValueError("`tp_plan` and `device_map` are mutually exclusive. Choose either one for parallelization.")
if device_mesh is None:
if not is_torch_greater_or_equal("2.5"):
raise OSError("Tensor parallel is only supported for `torch>=2.5`.")
# Detect the accelerator on the machine. If no accelerator is available, it returns CPU.
device_type = torch._C._get_accelerator().type
if device_type == "mps":
device_type = "cpu" # fallback
current_device = getattr(torch, device_type)
if not torch.distributed.is_initialized():
try:
rank = int(os.environ["RANK"])
local_rank = int(os.environ["LOCAL_RANK"])
world_size = int(os.environ["WORLD_SIZE"])
backend_map = {"cuda": "nccl", "cpu": "gloo", "xpu": "xccl", "hpu": "hccl"}
backend = backend_map.get(device_type)
if device_type == "cpu" and int(os.environ.get("CCL_WORKER_COUNT", "0")):
backend = "ccl"
if device_type == "xpu" and not is_torch_greater_or_equal("2.8", accept_dev=True):
backend = "ccl"
torch.distributed.init_process_group(backend=backend, rank=rank, world_size=world_size)
current_device = getattr(torch, device_type)
if device_type != "cpu":
current_device.set_device(local_rank)
except Exception as e:
raise OSError(
"We tried to initialize torch.distributed for you, but it failed. Make "
"sure you init torch distributed in your script to use `tp_plan`."
) from e
if device_type != "cpu":
current_device.set_device(int(os.environ["LOCAL_RANK"]))
index = current_device.current_device()
tp_device = torch.device(device_type, index)
device_map = tp_device
# Silence output for non-primary ranks
if index > 0:
import sys
sys.stdout = open(os.devnull, "w")
sys.stderr = open(os.devnull, "w")
else:
tp_device = torch.device(device_type)
device_map = device_type or {}
tp_size = tp_size if tp_size is not None else torch.distributed.get_world_size()
device_mesh = torch.distributed.init_device_mesh(tp_device.type, (tp_size,))
else:
if device_mesh.ndim > 1:
if "tp" not in device_mesh.mesh_dim_names:
raise ValueError(
"When using `tp_plan` and n-d `device_mesh`, it must contain a 'tp' dimension. "
"Please provide a valid `device_mesh`."
)
device_mesh = device_mesh["tp"]
tp_size = device_mesh.size()
device_map = torch.device(f"{device_mesh.device_type}:{int(os.environ['LOCAL_RANK'])}")
return device_map, device_mesh, tp_size
def _blocks_to_block_sizes(total_size: int, blocks: int | list[int]) -> list[int]:
"""
Convert block count or proportions to block sizes.
This function accepts
- The number of blocks (int), in which case the block size is
total_size//blocks; or
- A list of block sizes (list[int]).
In the second case, if sum(blocks) < total_size, the ratios between
the block sizes will be preserved. For instance, if blocks is
[2, 1, 1] and total_size is 1024, the returned block sizes are
[512, 256, 256].
"""
if isinstance(blocks, list):
total_blocks = sum(blocks)
assert total_size % total_blocks == 0, f"Cannot split {total_size} in proportional blocks: {blocks}"
part_size = total_size // total_blocks
return [part_size * block for block in blocks]
else:
assert total_size % blocks == 0, f"Prepacked is not divisible by {blocks}"
single_size = total_size // blocks
return [single_size] * blocks
def replace_layer_number_by_wildcard(name: str) -> str:
"""
Replace the numbers in the `name` by wildcards, only if they are in-between dots (`.`) or if they are between
a dot (`.`) and the end of the string.
This matches how modules are named/numbered when using a nn.ModuleList or nn.Sequential, but will NOT match
numbers in a parameter name itself, e.g. if the param is named `"w1"` or `"w2"`.
"""
return re.sub(r"\.\d+(\.|$)", lambda m: ".*" + m.group(1), name)
def _get_parameter_tp_plan(parameter_name: str, tp_plan: dict[str, str], is_weight=True) -> str | None:
"""
Get the TP style for a parameter from the TP plan.
The TP plan is a dictionary that maps parameter names to TP styles.
The parameter name can be a generic name with wildcards (e.g. "*.weight") or a specific name (e.g. "layer_1.weight").
The `is_weight` is important because for weights, we want to support `.weights` and `.bias` cases seamlessly! but
not parent classes for `post_init` calls
"""
generic_param_name = replace_layer_number_by_wildcard(parameter_name)
if generic_param_name in tp_plan:
return tp_plan[generic_param_name]
elif is_weight and "." in generic_param_name and (module_name := generic_param_name.rsplit(".", 1)[0]) in tp_plan:
return tp_plan[module_name]
return None
if is_torch_available():
str_to_dtype = {
"BOOL": torch.bool,
"U8": torch.uint8,
"I8": torch.int8,
"I16": torch.int16,
"F16": torch.float16,
"BF16": torch.bfloat16,
"I32": torch.int32,
"F32": torch.float32,
"F64": torch.float64,
"I64": torch.int64,
"F8_E4M3": torch.float8_e4m3fn,
}
def get_packed_weights(param, empty_param, device_mesh, rank, dim):
"""
When weights are packed (gate_up_proj), we need to make sure each shard gets its correct share.
So if you have: gate_proj ( 16, 5120, 8190)
and up_proj ( 16, 5120, 8190)
packed as gate_up_proj ( 16, 5120, 2 * 8190)
And you shard along the last dimension, you need to interleave the gate and up values:
Now, if we shard along the last dimension across TP_size (Tensor Parallelism size), we must interleave the values from gate and up projections correctly.
Let's take TP_size = 4 for an example:
Packed tensor `gate_up_proj`
---------------------------------------------------------------
[ G0 G1 G2 G3 | G4 G5 G6 G7 | ... | U0 U1 U2 U3 | U4 U5 U6 U7 | ... ]
↑─────────────↑ ↑─────────────↑ ↑─────────────↑ ↑─────────────↑
Gate Slice 0 Gate Slice 1 Up Slice 0 Up Slice 1
Explanation:
- The first half of the tensor (left of the center) holds the gate_proj values.
- The second half (right of the center) holds the up_proj values.
- For TP=4, we divide each half into 4 slices. In this example, we show two slices for brevity.
- Each shard receives one slice from the gate part and the corresponding slice from the up part.
For instance:
• Shard 0 gets: [ Gate Slice 0, Up Slice 0 ] = [ G0, G1, G2, G3, U0, U1, U2, U3 ]
• Shard 1 gets: [ Gate Slice 1, Up Slice 1 ] = [ G4, G5, G6, G7, U4, U5, U6, U7 ]
• … and so on.
This ensures that each shard receives an equal portion of both gate and up projections, maintaining consistency across tensor parallelism.
"""
slice_ = param
total_size = empty_param.shape[dim]
world_size = device_mesh.size()
block_sizes = _blocks_to_block_sizes(total_size=total_size, blocks=2)
tensors_slices = []
block_offset = 0
for block_size in block_sizes:
shard_block_size = block_size // world_size
start = rank * shard_block_size
stop = (rank + 1) * shard_block_size
tensors_slices += range(block_offset + start, block_offset + stop)
block_offset += block_size
slice_dtype = slice_.get_dtype()
# Handle F8_E4M3 dtype by converting to float16 before slicing
# Without upcasting, the slicing causes : RuntimeError: "index_cpu" not implemented for 'Float8_e4m3fn'
casted = False
if slice_dtype == "F8_E4M3" or slice_dtype == "F8_E5M2":
slice_ = slice_[...].to(torch.float16)
casted = True
if dim == 0:
tensor = slice_[tensors_slices, ...]
elif dim == 1 or dim == -2:
tensor = slice_[:, tensors_slices, ...]
elif dim == 2 or dim == -1:
tensor = slice_[..., tensors_slices]
else:
raise ValueError(f"Unsupported dim {dim}, only dim 0, 1 or 2 are supported")
if casted:
return tensor
else:
return tensor.to(str_to_dtype[slice_dtype])
def repack_weights(
packed_parameter: torch.Tensor,
sharded_dim: int, # The dimension index in the global tensor that was sharded
world_size: int,
num_blocks: int = 2,
) -> torch.Tensor:
"""
Reorders a tensor that was reconstructed from sharded packed weights into its canonical packed format.
For example, if a weight was packed (e.g., gate_proj and up_proj) and then sharded,
DTensor.full_tensor() might produce an interleaved layout like [G0, U0, G1, U1, ...]
along the sharded dimension. This function reorders it to [G0, G1, ..., U0, U1, ...].
This is an inverse operation to get_packed_weights.
Args:
reconstructed_tensor: The tensor reconstructed from DTensor (e.g., via .full_tensor().contiguous()).
sharded_dim: The dimension index in the reconstructed_tensor that was originally sharded.
world_size: The tensor parallel world size.
num_packed_projs: The number of projections that were packed together (e.g., 2 for gate_up_proj).
Returns:
The reordered tensor in canonical packed format.
"""
if num_blocks != 2:
raise ValueError(
"Num blocks different from 2 is not supported yet. This is most likely a bug in your implementation as we only pack gate and up projections together."
)
actual_sharded_dim = sharded_dim if sharded_dim >= 0 else sharded_dim + packed_parameter.ndim
total_size_on_sharded_dim = packed_parameter.shape[actual_sharded_dim]
original_block_size_on_dim = total_size_on_sharded_dim // num_blocks
shard_chunk_size = original_block_size_on_dim // world_size
prefix_shape = packed_parameter.shape[:actual_sharded_dim]
suffix_shape = packed_parameter.shape[actual_sharded_dim + 1 :]
tensor_view = packed_parameter.view(
*prefix_shape,
world_size,
num_blocks,
shard_chunk_size,
*suffix_shape,
)
# Permute to bring num_packed_projs first, then world_size, then shard_chunk_size
# This groups all chunks of G together, then all chunks of U together.
# Target order of these middle dimensions: (num_packed_projs, world_size, shard_chunk_size)
# Current order of view's middle dimensions: (world_size, num_packed_projs, shard_chunk_size)
# Absolute indices of the dimensions to be permuted (world_size, num_packed_projs)
axis_ws_abs = len(prefix_shape)
axis_npp_abs = len(prefix_shape) + 1
permute_order = list(range(tensor_view.ndim))
permute_order[axis_ws_abs], permute_order[axis_npp_abs] = permute_order[axis_npp_abs], permute_order[axis_ws_abs]
tensor_permuted = tensor_view.permute(*permute_order)
# Reshape back to the original tensor's ndim, with the sharded dimension now correctly ordered as [G_all, U_all].
# The final shape should be the same as reconstructed_tensor.
final_ordered_tensor = tensor_permuted.reshape_as(packed_parameter)
return final_ordered_tensor
def get_tensor_shard(param, empty_param, device_mesh, rank, dim, tensor_idx: int | None = None):
"""
Generalized tensor sharding across a multi-dimensional device mesh.
Extract only the fraction of the parameter owned by the given `rank` when the parameter would have gone sharding at provided `dim`.
Extraction follows the pytorch `Shard` placement so that sharding and materializing back to full tensor follows `Shard` semantics.
`Shard` follows torch.chunk style sharding of the tensor. We demonstrate some cases below on how sharding happens including some edge cases
such as some ranks having an empty tensor as shard. Below implementation is robut to all these cases.
Case (1)
empty_param (16, 5120, 8190)
dim 0
device_mesh.size() 4
rank 0 gets (4, 5120, 8190) (0 ... 4, 5120, 8190)
rank 1 gets (4, 5120, 8190) (4 ... 8, 5120, 8190)
rank 2 gets (4, 5120, 8190) (8 ... 12, 5120, 8190)
rank 3 gets (4, 5120, 8190) (12 ... 16, 5120, 8190)
Case (2)
empty_param (16, 5120, 8190)
dim 0
device_mesh.size() 14
rank 0 gets (2, 5120, 8190) (0 ... 2, 5120, 8190)
rank 1 gets (2, 5120, 8190) (2 ... 4, 5120, 8190)
rank 2 gets (2, 5120, 8190) (4 ... 6, 5120, 8190)
rank 3 gets (2, 5120, 8190) (6 ... 8, 5120, 8190)
rank 4 gets (2, 5120, 8190) (8 ... 10, 5120, 8190)
rank 5 gets (2, 5120, 8190) (10 ... 12, 5120, 8190)
rank 6 gets (2, 5120, 8190) (12 ... 14, 5120, 8190)
rank 7 gets (2, 5120, 8190) (14 ... 16, 5120, 8190)
rank 8 gets (0, 5120, 8190)
rank 9 gets (0, 5120, 8190)
rank 10 gets (0, 5120, 8190)
rank 11 gets (0, 5120, 8190)
rank 12 gets (0, 5120, 8190)
rank 13 gets (0, 5120, 8190)
Case (3)
empty_param (16, 5120, 8190)
dim 0
device_mesh.size() 3
rank 0 gets (6, 5120, 8190) (0 ... 6, 5120, 8190)
rank 1 gets (6, 5120, 8190) (6 ... 12, 5120, 8190)
rank 2 gets (4, 5120, 8190) (12 ... 16, 5120, 8190)
In case (2), empty shards are returned with appropriate dimension to allow for operations to work smoothly.
Args:
param (torch.Tensor): The tensor to shard.
empty_param (torch.Tensor): A tensor used for shape reference.
device_mesh (torch.Tensor): Shape [d_0, ..., d_n] representing the mesh.
rank (int): Global rank of the current process/device.
dim (int): Dimension along which to shard the tensor.
"""
param_dim = empty_param.ndim
# Flatten the mesh to get the total number of devices
mesh_shape = device_mesh.shape
world_size = reduce(operator.mul, mesh_shape)
if dim < 0:
dim = param_dim + dim
if empty_param.dim() == 3 and dim == 1 and len(param.get_shape()) == 2:
dim = 0
elif empty_param.dim() == 3 and dim == 2 and len(param.get_shape()) == 2:
dim = 0
shard_size = math.ceil(empty_param.size(dim) / world_size)
start = rank * shard_size
end = min(start + shard_size, empty_param.size(dim))
if dim >= param_dim:
raise ValueError(f"dim {dim} is out of bounds for tensor of dimension {param_dim}")
if rank >= world_size:
raise ValueError(f"Rank {rank} is out of bounds for mesh size {world_size}")
# we have the full tensor not 1 part of it.
# in that case, we just assume that the weight was properly saved
# and thus because we TP if the layer is colwise it should not use this. Layer should be packed_colwise
# to inform that it needs to read form a packed tensor. It will also take care of the module list thingy.
# here we take care of potential chunking / layer split / layer chunking.
# The only "hard" case is? if we collect q,k,v -> merge it into qkv. In that case
# actually we still shard dim=0 does not change
# so only case is if the dim of the empty param is 3 and the shard dim is 0 -> we put the
# tensor on a certain device (with the input tensor_index)
dimensions = param.get_shape()
if empty_param.dim() == 3 and dim == 0 and len(param.get_shape()) == 2:
# special case we don't "shard" just send this entire tensor to the correct rank.
if start <= tensor_idx < end:
# this tensor does need to be materialized on this device:
return param[:]
else:
return torch.empty([], dtype=torch.int64, device=rank)
slice_indices = [slice(None)] * len(param.get_shape())
if start < param.get_shape()[dim]:
slice_indices[dim] = slice(start, end)
param = param[tuple(slice_indices)]
if isinstance(param, list): # TODO handle the modulelist case!
param = [p[:] for p in param]
return param
dimensions[dim] = 0
return torch.empty(tuple(dimensions), dtype=torch.int64) # empty allocates memory....
def distribute_module(
module: nn.Module,
device_mesh=None,
input_fn=None,
output_fn=None,
) -> nn.Module:
"""
Copy pasted from torch's function but we remove the communications (partitioning)
as well as buffer registering that is similarly not efficient.
"""
if len(module._forward_pre_hooks) == 0:
if input_fn is not None:
module.register_forward_pre_hook(lambda mod, inputs: input_fn(mod, inputs, device_mesh))
if output_fn is not None:
module.register_forward_hook(lambda mod, inputs, outputs: output_fn(mod, outputs, device_mesh))
return module
class TensorParallelLayer:
"""
General tensor parallel layer for transformers.
"""
use_dtensor = True
device_mesh = None
rank = None
# Used to compare the shape of the original tensor
empty_param = None
# Used to init the corresponding DTensor
shard = None
def __init__(self, device_mesh=None, rank=None, empty_param=None):
self.rank = rank
self.device_mesh = device_mesh
self.empty_param = empty_param
@staticmethod
def _prepare_input_fn(input_layouts, desired_input_layouts, mod, inputs, device_mesh): ...
@staticmethod
def _prepare_output_fn(output_layouts, use_local_output, mod, outputs, device_mesh): ...
def shard_tensor(
self, param: torch.Tensor, tensor_idx: int | None = None, device=None, dtype=None
) -> torch.Tensor:
raise NotImplementedError
def partition_tensor(self, param: torch.Tensor, dtype, to_contiguous: bool):
raise NotImplementedError
def prepare_module_tp(self, module: nn.Module, device_mesh) -> nn.Module:
if self.use_dtensor:
distribute_module(
module,
device_mesh,
partial(self._prepare_input_fn, self.input_layouts, self.desired_input_layouts),
partial(self._prepare_output_fn, self.output_layouts, self.use_local_output),
)
# use_dtensor needs to be set to false for nn.Parameter when you want to view, chunk, slice
# you name it. Whatever you want to do that is a bit unconventional, you need local tensors
class GatherParallel(TensorParallelLayer):
"""
Simple class used to define the hooks to add to a layer when we just want to gather the outputs
"""
def __init__(
self,
input_layouts: Placement | None = None,
output_layouts: Placement | None = None,
use_local_output: bool = True,
**kwargs,
):
super().__init__(**kwargs)
self.input_layouts = (input_layouts or Replicate(),)
self.output_layouts = output_layouts
self.desired_input_layouts = (Replicate(),)
self.use_local_output = use_local_output
@staticmethod
def _prepare_input_fn(input_layouts, desired_input_layouts, mod, inputs, device_mesh):
mod.expert_parallel_group = device_mesh.get_group()
if inputs and isinstance(inputs[0], DTensor):
inputs = inputs[0].to_local()
return inputs
@staticmethod
def _prepare_output_fn(output_layouts, use_local_output, mod, outputs, device_mesh):
if isinstance(outputs, torch.Tensor):
dist.all_reduce(outputs, op=dist.ReduceOp.SUM, async_op=False)
else:
dist.all_reduce(outputs[0], op=dist.ReduceOp.SUM, async_op=False)
return outputs
def shard_tensor(
self, param: torch.Tensor, tensor_idx: int | None = None, device=None, dtype=None
) -> torch.Tensor:
self.shard = [Replicate()]
return param[...].to(device=device, dtype=dtype)
def prepare_module_tp(self, module: nn.Module, device_mesh) -> nn.Module:
distribute_module(
module,
device_mesh,
partial(self._prepare_input_fn, None, None),
partial(self._prepare_output_fn, None, None),
)
class IsolatedParallel(TensorParallelLayer):
"""
This class is used to isolate computation in a TP layer from the rest of the world.
Parameters need to be LOCAL, so not dtensors
"""
@staticmethod
def _prepare_input_fn(input_layouts, desired_input_layouts, mod, inputs, device_mesh=None):
# annotate module input placements/sharding with input_layouts
input_tensor = inputs[0]
if isinstance(input_tensor, DTensor):
input_tensor = input_tensor.to_local()
return input_tensor
@staticmethod
def _prepare_output_fn(output_layouts, use_local_output, mod, outputs, device_mesh=None):
# TODO: figure out dynamo support for instance method and switch this to instance method
return outputs
def shard_tensor(
self, param: torch.Tensor, tensor_idx: int | None = None, device=None, dtype=None
) -> torch.Tensor:
parameter = param[...].to(device=device, dtype=dtype)
if self.device_mesh is not None:
parameter = parameter / self.device_mesh.size()
self.shard = None
return parameter
def partition_tensor(self, param: torch.Tensor, dtype, to_contiguous: bool):
parameter = self.shard_tensor(param, dtype=dtype)
if to_contiguous:
parameter = parameter.contiguous()
# TODO: assumes parent module will allreduce the output afterwards (e.g rowlinear bias is IsolatedParallel and parent module is GatherParallel)
return parameter
def prepare_module_tp(self, module: nn.Module, device_mesh) -> nn.Module:
distribute_module(
module,
device_mesh,
partial(self._prepare_input_fn, None, None),
partial(self._prepare_output_fn, None, None),
)
class ReplicateParallel(TensorParallelLayer):
"""
This class is used to replicate computation in a TP layer (used in SP regions when we don't use sequence parallelism for example)
"""
def __init__(self, use_dtensor=True, use_local_output=True, **kwargs):
super().__init__(**kwargs)
self.input_layouts = (Replicate(),)
self.output_layouts = (Replicate(),)
self.desired_input_layouts = (Replicate(),)
self.use_local_output = use_local_output
self.use_dtensor = use_dtensor
@staticmethod
def _prepare_input_fn(input_layouts, desired_input_layouts, mod, inputs, device_mesh):
# TODO: figure out dynamo support for instance method and switch this to instance method
# annotate module input placements/sharding with input_layouts
input_tensor = inputs[0]
if not isinstance(input_tensor, DTensor):
input_tensor = DTensor.from_local(input_tensor, device_mesh, input_layouts, run_check=False)
return input_tensor
@staticmethod
def _prepare_output_fn(output_layouts, use_local_output, mod, outputs, device_mesh):
return outputs.to_local() if use_local_output and isinstance(outputs, DTensor) else outputs
def shard_tensor(
self, param: torch.Tensor, tensor_idx: int | None = None, device=None, dtype=None
) -> torch.Tensor:
self.shard = [Replicate()]
return param[...].to(device=device, dtype=dtype)
def partition_tensor(self, param: torch.Tensor, dtype, to_contiguous: bool):
parameter = self.shard_tensor(param, dtype=dtype)
if self.use_dtensor:
parameter = DTensor.from_local(parameter, self.device_mesh, self.shard, run_check=False)
return parameter
class ColwiseParallel(TensorParallelLayer):
"""
General tensor parallel layer for transformers.
"""
def __init__(
self,
input_layouts: Placement | None = None,
output_layouts: Placement | None = None,
use_local_output: bool = True,
use_dtensor=True,
**kwargs,
):
super().__init__(**kwargs)
self.input_layouts = (input_layouts or Replicate(),)
self.output_layouts = (output_layouts or Shard(-1),)
self.desired_input_layouts = (Replicate(),)
self.use_local_output = use_local_output
self.use_dtensor = use_dtensor
@staticmethod
def _prepare_input_fn(input_layouts, desired_input_layouts, mod, inputs, device_mesh):
# TODO: figure out dynamo support for instance method and switch this to instance method
# annotate module input placements/sharding with input_layouts
input_tensor = inputs[0]
if not isinstance(input_tensor, DTensor):
input_tensor = DTensor.from_local(input_tensor, device_mesh, input_layouts, run_check=False)
# transform the input layouts to the desired layouts of ColwiseParallel
if input_layouts != desired_input_layouts:
input_tensor = input_tensor.redistribute(placements=desired_input_layouts, async_op=False)
return input_tensor
def shard_tensor(
self, param: torch.Tensor, tensor_idx: int | None = None, device=None, dtype=None
) -> torch.Tensor:
# If only 1 dim, shard this one (usually it's a `bias`)
dim = param.dim() if isinstance(param, torch.Tensor) else len(param.get_shape())
if dim == 1:
parameter = get_tensor_shard(param, self.empty_param, self.device_mesh, self.rank, -1, tensor_idx)
shard = [Shard(-1)]
else:
shard = [Shard(-2)]
parameter = get_tensor_shard(param, self.empty_param, self.device_mesh, self.rank, -2, tensor_idx)
self.shard = shard
return parameter.to(device=device, dtype=dtype)
def partition_tensor(self, param: torch.Tensor, dtype, to_contiguous: bool):
# colwise shard weight/bias to Shard(0), weight be Shard(-2) (0 if you have 1 dim only)
# means Colwise as Linear is input * weight^T + bias, where
# weight would become Shard(1)
parameter = self.shard_tensor(param, dtype=dtype)
if to_contiguous:
parameter = parameter.contiguous()
if self.use_dtensor:
parameter = DTensor.from_local(
parameter,
self.device_mesh,
self.shard,
run_check=False,
shape=self.empty_param.size(),
stride=self.empty_param.stride(),
)
return nn.Parameter(parameter, requires_grad=parameter.is_floating_point())
@staticmethod
def _prepare_output_fn(output_layouts, use_local_output, mod, outputs, device_mesh):
# outputs is a shard on last dimension DTensor, i.e. Shard(-1)
if outputs.placements != output_layouts:
outputs = outputs.redistribute(placements=output_layouts, async_op=False)
# back to local tensor
return outputs.to_local() if use_local_output and isinstance(outputs, DTensor) else outputs
class PackedColwiseParallel(ColwiseParallel):
def shard_tensor(
self, param: torch.Tensor, tensor_idx: int | None = None, device=None, dtype=None
) -> torch.Tensor:
parameter = get_packed_weights(param, self.empty_param, self.device_mesh, self.rank, -2)
return parameter.to(device=device, dtype=dtype)
def partition_tensor(self, param: torch.Tensor, dtype, to_contiguous: bool):
# colwise shard weight/bias to Shard(0), weight be Shard(-2) (0 if you have 1 dim only)
# means Colwise as Linear is input * weight^T + bias, where
# weight would become Shard(1)
parameter = self.shard_tensor(param, dtype=dtype)
if to_contiguous:
parameter = parameter.contiguous()
if self.use_dtensor:
parameter = DTensor.from_local(parameter, self.device_mesh, [Shard(-2)], run_check=False)
return nn.Parameter(parameter, requires_grad=parameter.is_floating_point())
class LocalColwiseParallel(ColwiseParallel):
"""
Colwise parallel with use_dtensor=False for local tensor operations.
"""
def __init__(self, **kwargs):
super().__init__(use_dtensor=False, **kwargs)
class ColwiseParallelReplicate(ColwiseParallel):
"""
Colwise parallel with output layouts replicated.
"""
def __init__(self, **kwargs):
super().__init__(output_layouts=Replicate(), **kwargs)
class RowwiseParallel(TensorParallelLayer):
"""
Partition a compatible nn.Module in a row-wise fashion. Currently supports nn.Linear and nn.Embedding.
Users can compose it with ColwiseParallel to achieve the sharding of more complicated modules.
(i.e. MLP, Attention)
Keyword Args:
input_layouts (Placement, optional):
The DTensor layout of input tensor for the nn.Module, this is used to annotate the input tensor to
become a DTensor. If not specified, we assume the input tensor to be sharded on the last dimension.
output_layouts (Placement, optional):
The DTensor layout of the output for the nn.Module, this is used to ensure the output of the nn.Module
with the user desired layout. If not specified, the output tensor is replicated.
use_local_output (bool, optional):
Whether to use local :class:`torch.Tensor` instead of :class:`DTensor` for the module output, default: True.
Returns:
A :class:`ParallelStyle` object that represents Rowwise sharding of the nn.Module.
"""
def __init__(
self,
input_layouts: Placement | None = None,
output_layouts: Placement | None = None,
use_local_output: bool = True,
use_dtensor: bool = True,
**kwargs,
):
super().__init__(**kwargs)
self.input_layouts = (input_layouts or Shard(-1),)
self.output_layouts = (output_layouts or Replicate(),)
self.use_local_output = use_local_output
self.use_dtensor = use_dtensor
def shard_tensor(
self, param: torch.Tensor, tensor_idx: int | None = None, device=None, dtype=None
) -> torch.Tensor:
# If only 1 dim, it should not be sharded (usually it's a `bias`)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | true |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/integrations/peft.py | src/transformers/integrations/peft.py | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import json
import os
from typing import Any, Literal
from ..conversion_mapping import get_model_conversion_mapping
from ..core_model_loading import WeightRenaming, rename_source_key
from ..utils import (
CONFIG_NAME,
cached_file,
check_peft_version,
extract_commit_hash,
find_adapter_config_file,
is_accelerate_available,
is_peft_available,
is_torch_available,
logging,
)
from ..utils.hub import DownloadKwargs
if is_torch_available():
import torch
if is_accelerate_available():
from accelerate import dispatch_model
from accelerate.utils import get_balanced_memory, infer_auto_device_map
# Minimum PEFT version supported for the integration
MIN_PEFT_VERSION = "0.18.0"
logger = logging.get_logger(__name__)
class PeftAdapterMixin:
"""
A class containing all functions for loading and using adapters weights that are supported in PEFT library. For
more details about adapters and injecting them on a transformer-based model, check out the documentation of PEFT
library: https://huggingface.co/docs/peft/index
Currently supported PEFT methods are all non-prompt learning methods (LoRA, IA³, etc.). Other PEFT models such as
prompt tuning, prompt learning are out of scope as these adapters are not "injectable" into a torch module. For
using these methods, please refer to the usage guide of PEFT library.
With this mixin, if the correct PEFT version is installed (>= 0.18.0), it is possible to:
- Load an adapter stored on a local path or in a remote Hub repository, and inject it in the model
- Attach new adapters in the model and train them with Trainer or by your own.
- Attach multiple adapters and iteratively activate / deactivate them
- Activate / deactivate all adapters from the model.
- Get the `state_dict` of the active adapter.
"""
_hf_peft_config_loaded = False
_prepare_peft_hotswap_kwargs: dict | None = None
def load_adapter(
self,
peft_model_id: str | None = None,
adapter_name: str | None = None,
revision: str | None = None,
token: str | None = None,
device_map: str = "auto",
max_memory: str | None = None,
offload_folder: str | None = None,
offload_index: int | None = None,
peft_config: dict[str, Any] | None = None,
adapter_state_dict: dict[str, "torch.Tensor"] | None = None,
low_cpu_mem_usage: bool = False,
is_trainable: bool = False,
hotswap: bool | Literal["auto"] = "auto",
local_files_only: bool = False,
adapter_kwargs: dict[str, Any] | None = None,
) -> None:
"""
Load adapter weights from file or remote Hub folder. If you are not familiar with adapters and PEFT methods, we
invite you to read more about them on PEFT official documentation: https://huggingface.co/docs/peft
Requires PEFT to be installed as a backend to load the adapter weights.
Args:
peft_model_id (`str`, *optional*):
The identifier of the model to look for on the Hub, or a local path to the saved adapter config file
and adapter weights.
adapter_name (`str`, *optional*):
The adapter name to use. If not set, will use the name "default".
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
> [!TIP]
> To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>"`.
token (`str`, `optional`):
Whether to use authentication token to load the remote folder. Useful to load private repositories
that are on HuggingFace Hub. You might need to call `hf auth login` and paste your tokens to
cache it.
device_map (`str` or `dict[str, Union[int, str, torch.device]]` or `int` or `torch.device`, *optional*):
A map that specifies where each submodule should go. It doesn't need to be refined to each
parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the
same device. If we only pass the device (*e.g.*, `"cpu"`, `"cuda:1"`, `"mps"`, or a GPU ordinal rank
like `1`) on which the model will be allocated, the device map will map the entire model to this
device. Passing `device_map = 0` means put the whole model on GPU 0.
To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For
more information about each option see [designing a device
map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map).
max_memory (`Dict`, *optional*):
A dictionary device identifier to maximum memory. Will default to the maximum memory available for each
GPU and the available CPU RAM if unset.
offload_folder (`str` or `os.PathLike`, `optional`):
If the `device_map` contains any value `"disk"`, the folder where we will offload weights.
offload_index (`int`, `optional`):
`offload_index` argument to be passed to `accelerate.dispatch_model` method.
peft_config (`dict[str, Any]`, *optional*):
The configuration of the adapter to add, supported adapters are all non-prompt learning configs (LoRA,
IA³, etc). This argument is used in case users directly pass PEFT state dicts.
adapter_state_dict (`dict[str, torch.Tensor]`, *optional*):
The state dict of the adapter to load. This argument is used in case users directly pass PEFT state
dicts.
low_cpu_mem_usage (`bool`, *optional*, defaults to `False`):
Reduce memory usage while loading the PEFT adapter. This should also speed up the loading process.
is_trainable (`bool`, *optional*, defaults to `False`):
Whether the adapter should be trainable or not. If `False`, the adapter will be frozen and can only be
used for inference.
hotswap : (`"auto"` or `bool`, *optional*, defaults to `"auto"`)
Whether to substitute an existing (LoRA) adapter with the newly loaded adapter in-place. This means
that, instead of loading an additional adapter, this will take the existing adapter weights and replace
them with the weights of the new adapter. This can be faster and more memory efficient. However, the
main advantage of hotswapping is that when the model is compiled with torch.compile, loading the new
adapter does not require recompilation of the model. When using hotswapping, the passed `adapter_name`
should be the name of an already loaded adapter.
If the new adapter and the old adapter have different ranks and/or LoRA alphas (i.e. scaling), you need
to call an additional method before loading the adapter:
```py
model = AutoModel.from_pretrained(...)
max_rank = ... # the highest rank among all LoRAs that you want to load
# call *before* compiling and loading the LoRA adapter
model.enable_peft_hotswap(target_rank=max_rank)
model.load_adapter(file_name_1, adapter_name="default")
# optionally compile the model now
model = torch.compile(model, ...)
output_1 = model(...)
# now you can hotswap the 2nd adapter, use the same name as for the 1st
# hotswap is activated by default since enable_peft_hotswap was called
model.load_adapter(file_name_2, adapter_name="default")
output_2 = model(...)
```
By default, hotswap is disabled and requires passing `hotswap=True`. If you called
`enable_peft_hotswap` first, it is enabled. You can still manually disable it in that case by passing
`hotswap=False`.
Note that hotswapping comes with a couple of limitations documented here:
https://huggingface.co/docs/peft/main/en/package_reference/hotswap
adapter_kwargs (`dict[str, Any]`, *optional*):
Additional keyword arguments passed along to the `from_pretrained` method of the adapter config and
`find_adapter_config_file` method.
"""
check_peft_version(min_version=MIN_PEFT_VERSION)
from peft import PeftType
if hotswap == "auto":
# if user called model.enable_peft_hotswap and this is not the first adapter, enable hotswap
hotswap_enabled = getattr(self, "_hotswap_enabled", False)
not_first_adapter = bool(self._hf_peft_config_loaded and (adapter_name in self.peft_config))
hotswap = hotswap_enabled and not_first_adapter
if hotswap:
if (not self._hf_peft_config_loaded) or (adapter_name not in self.peft_config):
raise ValueError(
"To hotswap an adapter, there must already be an existing adapter with the same adapter name."
)
if any(conf.peft_type != PeftType.LORA for conf in self.peft_config.values()):
raise ValueError("Hotswapping is currently only supported for LoRA, please set `hotswap=False`.")
key_mapping = adapter_kwargs.pop("key_mapping", None) if adapter_kwargs is not None else None
weight_conversions = get_model_conversion_mapping(self, key_mapping=key_mapping)
# peft only supports low_cpu_mem_usage starting from v0.13.0
peft_load_kwargs = {}
peft_load_kwargs["low_cpu_mem_usage"] = low_cpu_mem_usage
adapter_name = adapter_name if adapter_name is not None else "default"
if adapter_kwargs is None:
adapter_kwargs = {}
from peft import PeftConfig, inject_adapter_in_model, load_peft_weights
from peft.utils import set_peft_model_state_dict
if self._hf_peft_config_loaded and (not hotswap) and (adapter_name in self.peft_config):
raise ValueError(f"Adapter with name {adapter_name} already exists. Please use a different name.")
elif hotswap and ((not self._hf_peft_config_loaded) or (adapter_name not in self.peft_config)):
raise ValueError(
"To hotswap an adapter, there must already be an existing adapter with the same adapter name."
)
if peft_model_id is None and (adapter_state_dict is None and peft_config is None):
raise ValueError(
"You should either pass a `peft_model_id` or a `peft_config` and `adapter_state_dict` to load an adapter."
)
if "device" not in adapter_kwargs:
device = self.device if not hasattr(self, "hf_device_map") else list(self.hf_device_map.values())[0]
else:
device = adapter_kwargs.pop("device")
# To avoid PEFT errors later on with safetensors.
if isinstance(device, torch.device):
device = str(device)
# We keep `revision` in the signature for backward compatibility
if revision is not None and "revision" not in adapter_kwargs:
adapter_kwargs["revision"] = revision
elif revision is not None and "revision" in adapter_kwargs and revision != adapter_kwargs["revision"]:
logger.error(
"You passed a `revision` argument both in `adapter_kwargs` and as a standalone argument. "
"The one in `adapter_kwargs` will be used."
)
# Override token with adapter_kwargs' token
if "token" in adapter_kwargs:
token = adapter_kwargs.pop("token")
if peft_config is None:
adapter_config_file = find_adapter_config_file(
peft_model_id,
token=token,
local_files_only=local_files_only,
**adapter_kwargs,
)
if adapter_config_file is None:
raise ValueError(
f"adapter model file not found in {peft_model_id}. Make sure you are passing the correct path to the "
"adapter model."
)
peft_config = PeftConfig.from_pretrained(
peft_model_id,
token=token,
local_files_only=local_files_only,
**adapter_kwargs,
)
peft_config.inference_mode = not is_trainable
if not hotswap:
# TODO: WE NEED TOO APPLY OUR DYNAMIC WEIGHT CONVERSION AT SOME POINT HERE!
# Create and add fresh new adapters into the model, unless the weights are hotswapped
inject_adapter_in_model(peft_config, self, adapter_name, **peft_load_kwargs)
if not self._hf_peft_config_loaded:
self._hf_peft_config_loaded = True
if peft_model_id is not None:
if "local_files_only" not in adapter_kwargs:
adapter_kwargs["local_files_only"] = local_files_only
adapter_state_dict = load_peft_weights(peft_model_id, token=token, device=device, **adapter_kwargs)
# We need to pre-process the state dict to remove unneeded prefixes - for backward compatibility
renamings = []
if weight_conversions:
renamings = [entry for entry in weight_conversions if isinstance(entry, WeightRenaming)]
processed_adapter_state_dict = {}
prefix = "base_model.model."
state_dict = self.state_dict()
for key, value in adapter_state_dict.items():
if key.startswith(prefix):
new_key = key[len(prefix) :]
else:
new_key = key
new_key = rename_source_key(new_key, renamings, [], self.base_model_prefix, state_dict)[0]
# For hotswapping, we need the adapter name to be present in the state dict keys
if hotswap:
if key.endswith("lora_A.weight") or key.endswith("lora_B.weight"):
new_key = new_key[: -len(".weight")] + f".{adapter_name}.weight"
elif key.endswith("lora_B.bias"): # lora_bias=True option
new_key = new_key[: -len(".bias")] + f".{adapter_name}.bias"
processed_adapter_state_dict[new_key] = value
# Load state dict
if not hotswap:
incompatible_keys = set_peft_model_state_dict(
self, processed_adapter_state_dict, adapter_name, **peft_load_kwargs
)
if self._prepare_peft_hotswap_kwargs is not None:
# For hotswapping of compiled models or adapters with different ranks.
# If the user called enable_peft_hotswap, we need to ensure it is called:
# - after the first adapter was loaded
# - before the model is compiled and the 2nd adapter is being hotswapped in
# Therefore, it needs to be called here
from peft.utils.hotswap import prepare_model_for_compiled_hotswap
prepare_model_for_compiled_hotswap(self, config=peft_config, **self._prepare_peft_hotswap_kwargs)
# We only want to call prepare_model_for_compiled_hotswap once
self._prepare_peft_hotswap_kwargs = None
else:
from peft.utils.hotswap import check_hotswap_configs_compatible, hotswap_adapter_from_state_dict
check_hotswap_configs_compatible(self.peft_config[adapter_name], peft_config)
try:
hotswap_adapter_from_state_dict(
model=self,
state_dict=processed_adapter_state_dict,
adapter_name=adapter_name,
config=peft_config,
)
except Exception as e:
logger.error(f"Hotswapping {adapter_name} was unsucessful with the following error: \n{e}")
raise
incompatible_keys = None
if incompatible_keys is not None:
err_msg = ""
origin_name = peft_model_id if peft_model_id is not None else "state_dict"
# Check for unexpected keys.
if hasattr(incompatible_keys, "unexpected_keys") and len(incompatible_keys.unexpected_keys) > 0:
err_msg = (
f"Loading adapter weights from {origin_name} led to unexpected keys not found in the model: "
f"{', '.join(incompatible_keys.unexpected_keys)}. "
)
# Check for missing keys.
missing_keys = getattr(incompatible_keys, "missing_keys", None)
if missing_keys:
# Filter missing keys specific to the current adapter, as missing base model keys are expected.
lora_missing_keys = [k for k in missing_keys if "lora_" in k and adapter_name in k]
if lora_missing_keys:
err_msg += (
f"Loading adapter weights from {origin_name} led to missing keys in the model: "
f"{', '.join(lora_missing_keys)}"
)
if err_msg:
logger.warning(err_msg)
if peft_config.inference_mode:
self.eval()
# Re-dispatch model and hooks in case the model is offloaded to CPU / Disk.
if (
(getattr(self, "hf_device_map", None) is not None)
and (len(set(self.hf_device_map.values()).intersection({"cpu", "disk"})) > 0)
and len(self.peft_config) == 1
):
self._dispatch_accelerate_model(
device_map=device_map,
max_memory=max_memory,
offload_folder=offload_folder,
offload_index=offload_index,
)
def enable_peft_hotswap(
self, target_rank: int = 128, check_compiled: Literal["error", "warn", "ignore"] = "error"
) -> None:
"""Enables the possibility to hotswap PEFT adapters with different ranks, or, if the model is compiled, without
triggering recompilation.
Right now, hotswapping is only supported for LoRA.
Calling this method is only required when hotswapping adapters and if the model is compiled or if the ranks of
the loaded adapters differ. If the ranks are all identical and the model is not compiled, hotswapping works
without calling this method first.
Args:
target_rank (`int`, *optional*, defaults to `128`):
The highest rank among all the adapters that will be loaded.
check_compiled (`str`, *optional*, defaults to `"error"`):
How to handle the case when the model is already compiled, which should generally be avoided. The
options are:
- "error" (default): raise an error
- "warn": issue a warning
- "ignore": do nothing
"""
if getattr(self, "peft_config", {}):
if check_compiled == "error":
raise RuntimeError("Call `enable_peft_hotswap` before loading the first adapter.")
elif check_compiled == "warn":
logger.warning(
"It is recommended to call `enable_peft_hotswap` before loading the first adapter to avoid recompilation."
)
elif check_compiled != "ignore":
raise ValueError(
f"check_compiles should be one of 'error', 'warn', or 'ignore', got '{check_compiled}' instead."
)
self._hotswap_enabled = True
self._prepare_peft_hotswap_kwargs = {"target_rank": target_rank, "check_compiled": check_compiled}
def add_adapter(self, adapter_config, adapter_name: str | None = None) -> None:
r"""
If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT
official documentation: https://huggingface.co/docs/peft
Adds a fresh new adapter to the current model for training purpose. If no adapter name is passed, a default
name is assigned to the adapter to follow the convention of PEFT library (in PEFT we use "default" as the
default adapter name).
Note that the newly added adapter is not automatically activated. To activate it, use `model.set_adapter`.
Args:
adapter_config (`~peft.PeftConfig`):
The configuration of the adapter to add, supported adapters are non-prompt learning methods (LoRA,
IA³, etc.).
adapter_name (`str`, *optional*, defaults to `"default"`):
The name of the adapter to add. If no name is passed, a default name is assigned to the adapter.
"""
check_peft_version(min_version=MIN_PEFT_VERSION)
from peft import PeftConfig, inject_adapter_in_model
adapter_name = adapter_name or "default"
if not self._hf_peft_config_loaded:
self._hf_peft_config_loaded = True
elif adapter_name in self.peft_config:
raise ValueError(f"Adapter with name {adapter_name} already exists. Please use a different name.")
if not isinstance(adapter_config, PeftConfig):
raise TypeError(f"adapter_config should be an instance of PeftConfig. Got {type(adapter_config)} instead.")
# Retrieve the name or path of the model, one could also use self.config._name_or_path
# but to be consistent with what we do in PEFT: https://github.com/huggingface/peft/blob/6e783780ca9df3a623992cc4d1d665001232eae0/src/peft/mapping.py#L100
adapter_config.base_model_name_or_path = self.__dict__.get("name_or_path", None)
# TODO: WE NEED TOO APPLY OUR DYNAMIC WEIGHT CONVERSION AT SOME POINT HERE!
inject_adapter_in_model(adapter_config, self, adapter_name)
self.set_adapter(adapter_name)
def set_adapter(self, adapter_name: list[str] | str) -> None:
"""
If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT
official documentation: https://huggingface.co/docs/peft
Sets a specific adapter by forcing the model to use a that adapter and disable the other adapters.
Args:
adapter_name (`Union[list[str], str]`):
The name of the adapter to set. Can be also a list of strings to set multiple adapters.
"""
check_peft_version(min_version=MIN_PEFT_VERSION)
if not self._hf_peft_config_loaded:
raise ValueError("No adapter loaded. Please load an adapter first.")
elif isinstance(adapter_name, list):
missing = set(adapter_name) - set(self.peft_config)
if len(missing) > 0:
raise ValueError(
f"Following adapter(s) could not be found: {', '.join(missing)}. Make sure you are passing the correct adapter name(s)."
f" current loaded adapters are: {list(self.peft_config.keys())}"
)
elif adapter_name not in self.peft_config:
raise ValueError(
f"Adapter with name {adapter_name} not found. Please pass the correct adapter name among {list(self.peft_config.keys())}"
)
from peft.tuners.tuners_utils import BaseTunerLayer
from peft.utils import ModulesToSaveWrapper
_adapters_has_been_set = False
for _, module in self.named_modules():
if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)):
module.set_adapter(adapter_name)
_adapters_has_been_set = True
if not _adapters_has_been_set:
raise ValueError(
"Did not succeeded in setting the adapter. Please make sure you are using a model that supports adapters."
)
def disable_adapters(self) -> None:
r"""
If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT
official documentation: https://huggingface.co/docs/peft
Disable all adapters that are attached to the model. This leads to inferring with the base model only.
"""
check_peft_version(min_version=MIN_PEFT_VERSION)
if not self._hf_peft_config_loaded:
raise ValueError("No adapter loaded. Please load an adapter first.")
from peft.tuners.tuners_utils import BaseTunerLayer
from peft.utils import ModulesToSaveWrapper
for _, module in self.named_modules():
if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)):
module.enable_adapters(enabled=False)
def enable_adapters(self) -> None:
"""
If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT
official documentation: https://huggingface.co/docs/peft
Enable adapters that are attached to the model.
"""
check_peft_version(min_version=MIN_PEFT_VERSION)
if not self._hf_peft_config_loaded:
raise ValueError("No adapter loaded. Please load an adapter first.")
from peft.tuners.tuners_utils import BaseTunerLayer
for _, module in self.named_modules():
if isinstance(module, BaseTunerLayer):
module.enable_adapters(enabled=True)
def active_adapters(self) -> list[str]:
"""
If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT
official documentation: https://huggingface.co/docs/peft
Gets the current active adapters of the model. In case of multi-adapter inference (combining multiple adapters
for inference) returns the list of all active adapters so that users can deal with them accordingly.
For previous PEFT versions (that does not support multi-adapter inference), `module.active_adapter` will return
a single string.
"""
check_peft_version(min_version=MIN_PEFT_VERSION)
if not self._hf_peft_config_loaded:
raise ValueError("No adapter loaded. Please load an adapter first.")
from peft.tuners.tuners_utils import BaseTunerLayer
for _, module in self.named_modules():
if isinstance(module, BaseTunerLayer):
active_adapters = module.active_adapter
break
# For previous PEFT versions
if isinstance(active_adapters, str):
active_adapters = [active_adapters]
return active_adapters
def get_adapter_state_dict(self, adapter_name: str | None = None, state_dict: dict | None = None) -> dict:
"""
If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT
official documentation: https://huggingface.co/docs/peft
Gets the adapter state dict that should only contain the weights tensors of the specified adapter_name adapter.
If no adapter_name is passed, the active adapter is used.
Args:
adapter_name (`str`, *optional*):
The name of the adapter to get the state dict from. If no name is passed, the active adapter is used.
state_dict (nested dictionary of `torch.Tensor`, *optional*)
The state dictionary of the model. Will default to `self.state_dict()`, but can be used if special
precautions need to be taken when recovering the state dictionary of a model (like when using model
parallelism).
"""
check_peft_version(min_version=MIN_PEFT_VERSION)
if not self._hf_peft_config_loaded:
raise ValueError("No adapter loaded. Please load an adapter first.")
from peft import get_peft_model_state_dict
if adapter_name is None:
adapter_name = self.active_adapters()[0]
adapter_state_dict = get_peft_model_state_dict(self, state_dict=state_dict, adapter_name=adapter_name)
return adapter_state_dict
def _dispatch_accelerate_model(
self,
device_map: str,
max_memory: int | None = None,
offload_folder: str | None = None,
offload_index: int | None = None,
) -> None:
"""
Optional re-dispatch the model and attach new hooks to the model in case the model has been loaded with
accelerate (i.e. with `device_map=xxx`)
Args:
device_map (`str` or `dict[str, Union[int, str, torch.device]]` or `int` or `torch.device`, *optional*):
A map that specifies where each submodule should go. It doesn't need to be refined to each
parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the
same device. If we only pass the device (*e.g.*, `"cpu"`, `"cuda:1"`, `"mps"`, or a GPU ordinal rank
like `1`) on which the model will be allocated, the device map will map the entire model to this
device. Passing `device_map = 0` means put the whole model on GPU 0.
To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For
more information about each option see [designing a device
map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map).
max_memory (`Dict`, *optional*):
A dictionary device identifier to maximum memory. Will default to the maximum memory available for each
GPU and the available CPU RAM if unset.
offload_folder (`str` or `os.PathLike`, *optional*):
If the `device_map` contains any value `"disk"`, the folder where we will offload weights.
offload_index (`int`, *optional*):
The offload_index argument to be passed to `accelerate.dispatch_model` method.
"""
dispatch_model_kwargs = {}
# Safety checker for previous `accelerate` versions
# `offload_index` was introduced in https://github.com/huggingface/accelerate/pull/873/
if "offload_index" in inspect.signature(dispatch_model).parameters:
dispatch_model_kwargs["offload_index"] = offload_index
no_split_module_classes = self._no_split_modules
if device_map != "sequential":
max_memory = get_balanced_memory(
self,
max_memory=max_memory,
no_split_module_classes=no_split_module_classes,
low_zero=(device_map == "balanced_low_0"),
)
if isinstance(device_map, str):
device_map = infer_auto_device_map(
self, max_memory=max_memory, no_split_module_classes=no_split_module_classes
)
dispatch_model(
self,
device_map=device_map,
offload_dir=offload_folder,
**dispatch_model_kwargs,
)
def delete_adapter(self, adapter_names: list[str] | str) -> None:
"""
Delete a PEFT adapter from the underlying model.
Args:
adapter_names (`Union[list[str], str]`):
The name(s) of the adapter(s) to delete.
"""
check_peft_version(min_version=MIN_PEFT_VERSION)
if not self._hf_peft_config_loaded:
raise ValueError("No adapter loaded. Please load an adapter first.")
from peft.functional import delete_adapter
if isinstance(adapter_names, str):
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | true |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/integrations/fbgemm_fp8.py | src/transformers/integrations/fbgemm_fp8.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import lru_cache
from typing import Optional
from ..activations import ACT2FN
from ..core_model_loading import ConversionOps
from ..quantizers.quantizers_utils import get_module_from_name, should_convert_module
from ..utils import (
is_accelerate_available,
is_fbgemm_gpu_available,
is_torch_available,
is_torch_xpu_available,
logging,
)
if is_torch_available():
import torch
from torch import nn
if is_accelerate_available():
from accelerate import init_empty_weights
_is_torch_xpu_available = is_torch_xpu_available()
if is_fbgemm_gpu_available() and not _is_torch_xpu_available:
import fbgemm_gpu.experimental.gen_ai # noqa: F401
logger = logging.get_logger(__name__)
class FbgemmFp8Quantize(ConversionOps):
def __init__(self, hf_quantizer):
self.hf_quantizer = hf_quantizer
def convert(
self,
input_dict: dict[str, torch.Tensor | list[torch.Tensor]],
model: Optional[torch.nn.Module] = None,
**kwargs,
) -> dict[str, torch.Tensor]:
target_key, value = tuple(input_dict.items())[0]
value = value[0]
from ..integrations import FbgemmFp8Llama4TextExperts
module, tensor_name = get_module_from_name(model, target_key)
if isinstance(module, FbgemmFp8Llama4TextExperts):
if tensor_name == "gate_up_proj":
# Process each expert separately
# Transpose the second and third dimension
transposed_param = value.transpose(1, 2)
# Reshape to 2D for quantization
original_shape = transposed_param.shape
flattened_param = transposed_param.reshape(-1, original_shape[-1])
# Quantize using per row instead of per column
new_value_flat, weight_scale_flat = quantize_fp8_per_row(flattened_param)
# Reshape back to original dimensions
new_value = new_value_flat.reshape(original_shape)
new_value = new_value.transpose(1, 2)
weight_scale = weight_scale_flat.reshape(original_shape[0], 1, original_shape[1])
elif tensor_name == "down_proj":
# Process each expert separately
# Transpose the weights for proper quantization
transposed_param = value.transpose(1, 2)
# Reshape to 2D for quantization
original_shape = transposed_param.shape
flattened_param = transposed_param.reshape(-1, original_shape[-1])
# Quantize using per column
new_value_flat, weight_scale_flat = quantize_fp8_per_row(flattened_param)
# Reshape back to original dimensions
new_value = new_value_flat.reshape(original_shape)
new_value = new_value.transpose(1, 2)
weight_scale = weight_scale_flat.reshape(original_shape[0], original_shape[1], 1)
else:
new_value, weight_scale = quantize_fp8_per_row(value)
weight_scale = torch.nn.Parameter(weight_scale.view(weight_scale.shape[0], 1))
return {target_key: torch.nn.Parameter(new_value), f"{target_key}_scale": weight_scale}
class FbgemmFp8Linear(torch.nn.Linear):
def __init__(self, in_features, out_features, bias, dtype=torch.float8_e4m3fn):
super().__init__(in_features, out_features, bias)
self.in_features = in_features
self.out_features = out_features
self.weight = torch.nn.Parameter(torch.zeros((out_features, in_features), dtype=dtype))
self.weight_scale = torch.nn.Parameter(torch.zeros((out_features, 1), dtype=torch.float32))
self.register_buffer("input_scale_ub", torch.zeros([1], dtype=torch.float), persistent=False)
if bias:
self.bias = torch.nn.Parameter(torch.zeros((self.out_features), dtype=torch.float32))
else:
self.bias = None
def forward(self, x):
# quantize_fp8_per_row will squash the leading dimensions, so save the desired shape here
output_shape = (*x.shape[:-1], -1)
# x_quantized and x_scale are not necessarily on the same device as x, this is an issue.
# https://github.com/pytorch/FBGEMM/blob/e08af8539c391437f447173863df0f3f6f6f1855/fbgemm_gpu/experimental/gen_ai/src/quantize/quantize.cu#L1237C3-L1237C45
x_quantized, x_scale = quantize_fp8_per_row(x.view(-1, x.shape[-1]).contiguous(), scale_ub=self.input_scale_ub)
# moving x_quantized, x_scale here creates glibberish output ... However, if we move the output, it works
# x_quantized, x_scale = x_quantized.to(x.device), x_scale.to(x.device)
# The computation still happens on the device where self.weight is even if x_quantized is not on the same device as self.weight
weight_scale_float32 = self.weight_scale.to(torch.float32)
if _is_torch_xpu_available:
output = torch._scaled_mm(
x_quantized,
self.weight.t(),
scale_a=x_scale.unsqueeze(-1),
scale_b=weight_scale_float32.t(),
out_dtype=x.dtype,
bias=self.bias,
)
else:
output = torch.ops.fbgemm.f8f8bf16_rowwise(
x_quantized, self.weight, x_scale, weight_scale_float32, use_fast_accum=True
)
output = output + self.bias if self.bias is not None else output
# Hacky for now, we have the output to the device of x
output = output.to(x.device)
output = output.reshape(output_shape)
del x_quantized, x_scale
return output
class FbgemmFp8Llama4TextExperts(nn.Module):
def __init__(self, config, dtype=torch.float32):
super().__init__()
self.num_experts = config.num_local_experts
self.intermediate_size = config.intermediate_size
self.hidden_size = config.hidden_size
self.expert_dim = self.intermediate_size
self.act_fn = ACT2FN[config.hidden_act]
# Register FP8 buffers for gate_up_proj
self.gate_up_proj = torch.nn.Parameter(
torch.zeros((self.num_experts, self.hidden_size, 2 * self.expert_dim), dtype=torch.float8_e4m3fn)
)
self.gate_up_proj_scale = torch.nn.Parameter(
torch.zeros((self.num_experts, 1, self.expert_dim * 2), dtype=torch.float32)
)
# Register FP8 buffers for down_proj
self.down_proj = torch.nn.Parameter(
torch.zeros((self.num_experts, self.expert_dim, self.hidden_size), dtype=torch.float8_e4m3fn)
)
self.down_proj_scale = torch.nn.Parameter(
torch.zeros((self.num_experts, self.hidden_size, 1), dtype=torch.float32)
)
# Register input scale upper bound
self.register_buffer("input_scale_ub", torch.zeros([1], dtype=torch.float), persistent=False)
def forward(self, hidden_states):
"""
Args:
hidden_states (torch.Tensor): (batch_size * token_num, hidden_size)
Returns:
torch.Tensor: (batch_size * token_num, hidden_size)
"""
# Reshape hidden states for expert computation
hidden_states = hidden_states.view(self.num_experts, -1, self.hidden_size)
num_tokens = None
# Pre-allocate tensor for all expert outputs with same shape as hidden_states
next_states = torch.empty_like(hidden_states)
for i in range(self.num_experts):
# Extract expert's hidden states
expert_hidden = hidden_states[i]
expert_hidden_reshaped = expert_hidden.reshape(-1, self.hidden_size)
# Quantize for this expert
expert_quantized, expert_scale = quantize_fp8_per_row(
expert_hidden_reshaped, num_tokens, self.input_scale_ub
)
sharded_expert_dim = self.gate_up_proj.shape[-1] // 2
gate_up_proj_scale_float32 = self.gate_up_proj_scale.to(torch.float32)
if _is_torch_xpu_available:
gate = torch._scaled_mm(
expert_quantized,
self.gate_up_proj[i].transpose(0, 1)[:sharded_expert_dim].contiguous().t(),
scale_a=expert_scale.unsqueeze(-1),
scale_b=gate_up_proj_scale_float32[i][0][:sharded_expert_dim].view(-1, 1).contiguous().t(),
out_dtype=hidden_states.dtype,
)
up = torch._scaled_mm(
expert_quantized,
self.gate_up_proj[i].transpose(0, 1)[sharded_expert_dim:].contiguous().t(),
scale_a=expert_scale.unsqueeze(-1),
scale_b=gate_up_proj_scale_float32[i][0][sharded_expert_dim:].view(-1, 1).contiguous().t(),
out_dtype=hidden_states.dtype,
)
else:
gate = torch.ops.fbgemm.f8f8bf16_rowwise(
expert_quantized,
self.gate_up_proj[i].transpose(0, 1)[:sharded_expert_dim].contiguous(),
expert_scale,
gate_up_proj_scale_float32[i][0][:sharded_expert_dim].view(-1, 1).contiguous(),
use_fast_accum=True,
)
up = torch.ops.fbgemm.f8f8bf16_rowwise(
expert_quantized,
self.gate_up_proj[i].transpose(0, 1)[sharded_expert_dim:].contiguous(),
expert_scale,
gate_up_proj_scale_float32[i][0][sharded_expert_dim:].view(-1, 1).contiguous(),
use_fast_accum=True,
)
activated = up * self.act_fn(gate)
activated_quantized, activated_scale = quantize_fp8_per_row(activated, num_tokens, self.input_scale_ub)
down_proj_scale_float32 = self.down_proj_scale.to(torch.float32)
if _is_torch_xpu_available:
expert_output = torch._scaled_mm(
activated_quantized,
self.down_proj[i].transpose(0, 1).contiguous(),
scale_a=activated_scale.unsqueeze(-1),
scale_b=down_proj_scale_float32[i].view(-1, 1).contiguous().t(),
out_dtype=hidden_states.dtype,
)
else:
expert_output = torch.ops.fbgemm.f8f8bf16_rowwise(
activated_quantized,
self.down_proj[i].transpose(0, 1).contiguous(),
activated_scale,
down_proj_scale_float32[i].view(-1, 1).contiguous(),
use_fast_accum=True,
)
next_states[i] = expert_output
next_states = next_states.to(hidden_states.device)
return next_states.view(-1, self.hidden_size)
@lru_cache(maxsize=1)
def get_quantize_fp8_per_row():
if _is_torch_xpu_available:
from .hub_kernels import get_kernel
return get_kernel("kernels-community/fp8-fbgemm").quantize_fp8_per_row
return torch.ops.fbgemm.quantize_fp8_per_row
def replace_with_fbgemm_fp8_linear(
model, modules_to_not_convert: list[str] | None = None, quantization_config=None, pre_quantized=False, tp_plan=None
):
"""
A helper function to replace all `torch.nn.Linear` modules by `FbgemmFp8Linear` modules.
This will enable running your models using high performance fp8 kernel from FBGEMM library.
Parameters:
model (`torch.nn.Module`):
Input model or `torch.nn.Module` as the function is run recursively.
modules_to_not_convert (`list[`str`]`, *optional*, defaults to `None`):
Names of the modules to not convert. In practice we keep the `lm_head` in full precision for numerical stability reasons.
quantization_config (`FbgemmFp8Config`):
The quantization config object that contains the quantization parameters.
pre_quantized (`book`, defaults to `False`):
Whether the model is pre-quantized or not
"""
global quantize_fp8_per_row
quantize_fp8_per_row = get_quantize_fp8_per_row()
has_been_replaced = False
module_kwargs = {} if pre_quantized else {"dtype": None}
for module_name, module in model.named_modules():
if not should_convert_module(module_name, modules_to_not_convert):
continue
new_module = None
with init_empty_weights(include_buffers=True):
if module.__class__.__name__ == "Llama4TextExperts":
# TODO: make sure tp works later
# if tp_plan is not None:
# tp_key = re.sub(r"\d+", "*", f"{module_name}.down_proj_scale")
# tp_plan[tp_key] = None
text_config = getattr(model.config, "text_config", model.config)
new_module = FbgemmFp8Llama4TextExperts(text_config or model.config)
elif isinstance(module, nn.Linear):
new_module = FbgemmFp8Linear(
module.in_features,
module.out_features,
module.bias is not None,
**module_kwargs,
)
new_module.requires_grad_(False)
if new_module is None:
continue
if hasattr(new_module, "input_scale_ub"):
new_module.input_scale_ub = torch.tensor(
[quantization_config.activation_scale_ub],
dtype=torch.float,
)
model.set_submodule(module_name, new_module)
has_been_replaced = True
if not has_been_replaced:
logger.warning(
"You are loading your model using FP8 quantization but no linear modules were found in your model."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug."
)
return model
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/integrations/executorch.py | src/transformers/integrations/executorch.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import logging
import torch
from ..cache_utils import (
DynamicCache,
DynamicLayer,
DynamicSlidingWindowLayer,
EncoderDecoderCache,
StaticCache,
)
from ..generation.configuration_utils import GenerationConfig
from ..modeling_utils import PreTrainedModel
from ..pytorch_utils import (
is_torch_greater_or_equal,
is_torch_greater_or_equal_than_2_3,
is_torch_greater_or_equal_than_2_6,
)
class TorchExportableModuleForVLM:
"""
A wrapper class for exporting Vision-Language Models (VLMs) like SmolVLM2 for ExecuTorch.
This class handles the export of three main components:
1. Vision encoder (processes images to visual features)
2. Connector/projector (maps visual features to text embedding space)
3. Text decoder (generates text from combined visual and text tokens)
"""
def __init__(self, model, max_batch_size: int = 1, max_cache_len: int = 1024):
"""
Initialize the exportable VLM module.
Args:
model: The VLM (e.g. SmolVLM) model instance
max_batch_size: Maximum batch size. Always 1 for ExecuTorch
max_cache_len: Maximum cache length for text generation
"""
self.model = model
self.max_batch_size = max_batch_size
self.max_cache_len = max_cache_len
self.config = model.config
# Extract individual components
self.vision_encoder = model.model.vision_model
self.connector = model.model.connector
self.text_decoder = model.model.text_model
# Store exported programs
self.exported_vision_encoder = None
self.exported_connector = None
self.exported_text_decoder = None
def export_vision_encoder(self):
"""Export the vision encoder component."""
self.vision_encoder.eval()
# Create example input
pixel_values = torch.randn(1, 3, 384, 384, dtype=torch.float32)
# Define dynamic shapes
dynamic_shapes = {
"pixel_values": {
2: torch.export.Dim.AUTO,
3: torch.export.Dim.AUTO,
}
}
self.exported_vision_encoder = torch.export.export(
self.vision_encoder,
args=(pixel_values,),
dynamic_shapes=dynamic_shapes,
strict=False,
)
return self.exported_vision_encoder
def export_connector(self):
"""Export the connector component."""
self.connector.eval()
# Vision encoder output shape: [batch_size, num_patches, vision_hidden_size]
vision_hidden_size = self.config.vision_config.hidden_size
image_size = self.config.vision_config.image_size
patch_size = self.config.vision_config.patch_size
patches_per_dim = image_size // patch_size
num_patches = patches_per_dim * patches_per_dim
image_hidden_states = torch.randn(1, num_patches, vision_hidden_size, dtype=torch.float32)
# Define dynamic shapes - static batch_size=1, dynamic num_patches
dynamic_shapes = {"image_hidden_states": {1: torch.export.Dim.AUTO}}
# Export the connector using torch.export
self.exported_connector = torch.export.export(
self.connector,
args=(image_hidden_states,),
dynamic_shapes=dynamic_shapes,
strict=False,
)
return self.exported_connector
def export_text_decoder(self):
"""Export the text decoder component."""
# Create text decoder exportable wrapper
self.exportable_text_decoder = TorchExportableModuleForDecoderOnlyLM(model=self.text_decoder)
# Use the existing text decoder exportable wrapper
seq_length = 3
input_ids = torch.zeros((1, seq_length), dtype=torch.long)
cache_position = torch.arange(seq_length, dtype=torch.long)
max_seq_length = min(self.max_cache_len, self.config.text_config.max_position_embeddings)
seq_len_dim = torch.export.Dim("seq_length_dim", max=max_seq_length - 1)
dynamic_shapes = {
"input_ids": {1: seq_len_dim},
"cache_position": {0: seq_len_dim},
}
self.exported_text_decoder = self.exportable_text_decoder.export(
input_ids=input_ids,
cache_position=cache_position,
dynamic_shapes=dynamic_shapes,
strict=False,
)
return self.exported_text_decoder
def export(self, **kwargs):
"""Export all components of the VLM model."""
self.export_vision_encoder(**kwargs)
self.export_connector(**kwargs)
self.export_text_decoder(**kwargs)
return {
"vision_encoder": self.exported_vision_encoder,
"connector": self.exported_connector,
"text_decoder": self.exported_text_decoder,
}
def forward(self, pixel_values, input_ids, cache_position):
"""
Simplified forward pass for inference with guaranteed non-null input_ids and cache_position.
Args:
pixel_values: Input images [1, channels, height, width] (optional)
input_ids: Text token IDs [1, seq_len] (required - won't be None)
cache_position: Cache positions [seq_len] (required - won't be None)
Returns:
Output with logits for text generation
"""
def generate(
self, pixel_values=None, input_ids=None, max_new_tokens=50, do_sample=False, temperature=1.0, **kwargs
):
"""
Simplified generate method with guaranteed non-null input_ids.
Args:
pixel_values: Input images [1, channels, height, width] (optional)
input_ids: Initial text tokens [1, seq_len] (required - won't be None)
max_new_tokens: Maximum number of tokens to generate
do_sample: Whether to use sampling or greedy decoding
temperature: Temperature for sampling
Returns:
Generated sequences
"""
class TorchExportableModuleForDecoderOnlyLM(torch.nn.Module):
"""
A recipe module designed to make a `PreTrainedModel` exportable with `torch.export`,
specifically for decoder-only LM with cache. This module ensures that the
exported model is compatible with further lowering and execution in `ExecuTorch`.
"""
def __init__(
self,
model: PreTrainedModel,
batch_size: int | None = None,
max_cache_len: int | None = None,
device: torch.device | None = None,
) -> None:
"""
Initializes the exportable module.
Args:
model (`PreTrainedModel`): The pretrained model to wrap.
Raises:
ValueError: If the model is configured with a unsupported cache implementation.
"""
super().__init__()
config = model.config.get_text_config()
if not hasattr(config, "use_cache") or config.use_cache is False:
raise ValueError("The model must have caching enabled to be performant.")
if hasattr(config, "layer_types") and getattr(config, "sliding_window", None) is not None:
self.model = TorchExportableModuleWithHybridCache(model, batch_size, max_cache_len, device)
else:
# If `layer_types` is not specified explicitly in the config or `sliding_window` is null,
# there is only 1 type of layers, so export will use `StaticCache` by default.
logging.info(
"Using `StaticCache` for export as `layer_types` is not specified or `sliding_window` is `null` in the config."
)
self.model = TorchExportableModuleWithStaticCache(model, batch_size, max_cache_len, device)
def forward(
self,
input_ids: torch.Tensor | None = None,
inputs_embeds: torch.Tensor | None = None,
cache_position: torch.Tensor | None = None,
) -> torch.Tensor:
"""
Forward pass of the module, which is compatible with the ExecuTorch llm runner.
Args:
input_ids (`torch.Tensor`): Tensor representing current input token id to the module.
inputs_embeds (`torch.Tensor`): Tensor representing current input embeddings to the module.
cache_position (`torch.Tensor`): Tensor representing current input position in the cache.
Returns:
torch.Tensor: Logits output from the model.
"""
return self.model.forward(
input_ids=input_ids,
inputs_embeds=inputs_embeds,
cache_position=cache_position,
)
def export(
self,
input_ids: torch.Tensor | None = None,
inputs_embeds: torch.Tensor | None = None,
cache_position: torch.Tensor | None = None,
dynamic_shapes: dict | None = None,
strict: bool | None = None,
) -> torch.export.ExportedProgram:
"""
Export the wrapped module using `torch.export`.
Args:
input_ids (`Optional[torch.Tensor]`):
Tensor representing current input token id to the module. Must specify either this or inputs_embeds.
inputs_embeds (`Optional[torch.Tensor]`):
Tensor representing current input embeddings to the module. Must specify either this or input_ids.
cache_position (`Optional[torch.Tensor]`):
Tensor representing current input position in the cache. If not provided, a default tensor will be used.
dynamic_shapes (`Optional[dict]`):
Dynamic shapes to use for export if specified.
strict(`Optional[bool]`):
Flag to instruct `torch.export` to use `torchdynamo`.
Returns:
torch.export.ExportedProgram: The exported program that can be used for inference.
Examples:
Export with input_ids:
```python
# Prepare inputs
input_ids = torch.tensor([[1, 2, 3]], dtype=torch.long, device=model.device)
cache_position = torch.arange(input_ids.shape[-1], dtype=torch.long, device=model.device)
# Export
exported = exportable_module.export(
input_ids=input_ids,
cache_position=cache_position
)
```
Export with inputs_embeds:
```python
# Prepare embeddings
inputs_embeds = torch.randn(1, 3, 768, device=model.device) # batch_size=1, seq_len=3, hidden_size=768
cache_position = torch.arange(inputs_embeds.shape[1], dtype=torch.long, device=model.device)
# Export
exported = exportable_module.export(
inputs_embeds=inputs_embeds,
cache_position=cache_position
)
```
"""
if not (input_ids is None) ^ (inputs_embeds is None):
raise ValueError("Need to specify either input_ids or inputs_embeds.")
if hasattr(self.model, "base_model_prefix"):
base = getattr(self.model, self.model.base_model_prefix, self.model)
model_device = base.device
elif hasattr(self.model, "model"):
model_device = self.model.model.device
else:
model_device = "cpu"
logging.warning(
"TorchExportableModuleForDecoderOnlyLM.export Can't infer device from the model. Set to CPU by default."
)
if input_ids is not None:
input_kwargs = {
"input_ids": input_ids,
"cache_position": cache_position
if cache_position is not None
else torch.arange(input_ids.shape[-1], dtype=torch.long, device=model_device),
}
else: # inputs_embeds
input_kwargs = {
"inputs_embeds": inputs_embeds,
"cache_position": cache_position
if cache_position is not None
else torch.arange(inputs_embeds.shape[1], dtype=torch.long, device=model_device),
}
exported_program = torch.export.export(
self.model,
args=(),
kwargs=input_kwargs,
dynamic_shapes=dynamic_shapes,
strict=strict if strict is not None else True,
)
return exported_program
@staticmethod
def generate(
exported_program: torch.export.ExportedProgram,
tokenizer,
prompt: str,
max_new_tokens: int = 20,
do_sample: bool = False,
temperature: float = 1.0,
top_k: int = 50,
top_p: float = 1.0,
device: str = "cpu",
) -> str:
"""
Generate a sequence of tokens using an exported program.
Args:
exported_program (`torch.export.ExportedProgram`): The exported model being used for generate.
tokenizer: The tokenizer to use.
prompt (str): The input prompt.
max_new_tokens (int): Maximum number of new tokens to generate.
do_sample (bool): Whether to use sampling or greedy decoding.
temperature (float): The temperature for sampling.
top_k (int): The number of highest probability tokens to keep for top-k sampling.
top_p (float): The cumulative probability for nucleus sampling.
device (str): The device to use.
Returns:
str: The generated text.
"""
# Get the module from the exported program
exported_module = exported_program.module()
# Tokenize the prompt
input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(device)
# Initialize with the prompt
generated_ids = input_ids.clone()
# Process the prompt tokens first
curr_position = 0
for i in range(input_ids.shape[1]):
# Process one token at a time
curr_input_ids = input_ids[:, i : i + 1]
curr_cache_position = torch.tensor([curr_position], dtype=torch.long, device=device)
# Forward pass
_ = exported_module(input_ids=curr_input_ids, cache_position=curr_cache_position)
curr_position += 1
# Generate new tokens
for _ in range(max_new_tokens):
# Get the last token as input
curr_input_ids = generated_ids[:, -1:]
curr_cache_position = torch.tensor([curr_position], dtype=torch.long, device=device)
# Forward pass to get next token logits
outputs = exported_module(input_ids=curr_input_ids, cache_position=curr_cache_position)
# Get the next token ID
if do_sample:
# Apply temperature
if temperature > 0:
logits = outputs / temperature
else:
logits = outputs
# Apply top-k filtering
if top_k > 0:
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = float("-inf")
# Apply top-p (nucleus) filtering
if top_p < 1.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(torch.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
# Scatter sorted tensors to original indexing
indices_to_remove = sorted_indices_to_remove.scatter(-1, sorted_indices, sorted_indices_to_remove)
logits[indices_to_remove] = float("-inf")
# Sample from the filtered distribution
probs = torch.softmax(logits, dim=-1)
next_token_id = torch.multinomial(probs, num_samples=1)
else:
# Greedy decoding
next_token_id = outputs.argmax(dim=-1, keepdim=True)
# Ensure next_token_id has the right shape before concatenation
if next_token_id.dim() > 2:
next_token_id = next_token_id.squeeze(-1)
# Append to the generated sequence
generated_ids = torch.cat([generated_ids, next_token_id], dim=-1)
curr_position += 1
# Stop if we generate an EOS token
if next_token_id.item() == tokenizer.eos_token_id:
break
# Decode the generated text
return tokenizer.decode(generated_ids[0], skip_special_tokens=True)
class TorchExportableModuleWithStaticCache(torch.nn.Module):
"""
A recipe module designed to make a `PreTrainedModel` exportable with `torch.export`,
specifically for decoder-only LM to `StaticCache`. This module ensures that the
exported model is compatible with further lowering and execution in `ExecuTorch`.
Note:
This class is specifically designed to support export process using `torch.export`
in a way that ensures the model can be further lowered and run efficiently in `ExecuTorch`.
"""
def __init__(
self,
model: PreTrainedModel,
batch_size: int | None = None,
max_cache_len: int | None = None,
device: torch.device | None = None,
) -> None:
"""
Initializes the wrapper module with the pretrained model.
Args:
model (`PreTrainedModel`): The pretrained model to wrap. The model must have caching
enabled and use a 'static' caching implementation.
batch_size (`Optional[int]`): The batch size of the model. If not provided, we check if a value can be found
in `generation_config.cache_config` and otherwise we raise a ValueError.
max_cache_len (`Optional[int]`): The maximum cache length for generation. Same mechanism as `batch_size` if
not provided.
device (`Optional[torch.device]`): The device to use. If not provided, we check if a value can be found
in `generation_config.cache_config` and otherwise we use `model.device` (no error is raised).
Raises:
AssertionError: If the pretrained model does not have caching enabled or if it does
not use a 'static' caching implementation in `model.generation_config`.
ValueError: If `batch_size` or `max_cache_len` is not provided, either as an argument or in `cache_config`.
"""
super().__init__()
config = model.config.get_text_config()
generation_config = model.generation_config
# Sanity checks
if generation_config is None:
raise AssertionError(
"The model must have a generation config to be exported with static caching. "
"Please set `generation_config` in `model`."
)
if not generation_config.use_cache:
raise AssertionError(
"The model must have caching enabled to be exported with static caching. "
"Please set `generation_config.use_cache=True`."
)
if generation_config.cache_implementation != "static":
raise AssertionError(
"The model must use a 'static' caching implementation to be exported with static caching. "
"Please set `generation_config.cache_implementation='static'`."
)
cache_config = {} if generation_config.cache_config is None else generation_config.cache_config
# Ensure batch_size and max_cache_len are set
if batch_size is None:
batch_size = cache_config.get("batch_size", None)
if batch_size is None:
raise ValueError("batch_size must be provided, either as an argument or in cache_config.")
if max_cache_len is None:
max_cache_len = cache_config.get("max_cache_len", None)
if max_cache_len is None:
raise ValueError("max_cache_len must be provided, either as an argument or in cache_config.")
# Infer device if not provided
if device is None:
device = cache_config.get("device", model.device)
# Initialize the static cache
self.model = model
self.static_cache = StaticCache(max_cache_len=max_cache_len, config=config)
head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
num_heads = getattr(config, "num_key_value_heads", config.num_attention_heads)
dtype = self.model.dtype
# We need this call to initialize all the layers (otherwise it's done lazily, which is not exportable)
self.static_cache.early_initialization(batch_size, num_heads, head_dim, dtype, device)
for i in range(len(self.static_cache)):
self.register_buffer(f"key_cache_{i}", self.static_cache.layers[i].keys, persistent=False)
self.register_buffer(f"value_cache_{i}", self.static_cache.layers[i].values, persistent=False)
def forward(
self,
input_ids: torch.LongTensor | None = None,
inputs_embeds: torch.Tensor | None = None,
cache_position: torch.Tensor | None = None,
):
"""
Forward pass of the module, which is compatible with the ExecuTorch runtime.
Args:
input_ids (`torch.Tensor`): Tensor representing current input token id to the module.
inputs_embeds (`torch.Tensor`): Tensor representing current input embeddings to the module.
cache_position (`torch.Tensor`): Tensor representing current input position in the cache.
Returns:
torch.Tensor: Logits output from the model.
This forward adapter serves two primary purposes:
1. **Making the Model `torch.export`-Compatible**:
The adapter hides unsupported objects, such as the `Cache`, from the graph inputs and outputs,
enabling the model to be exportable using `torch.export` without encountering issues.
2. **Ensuring Compatibility with `ExecuTorch` runtime**:
The adapter matches the model's forward signature with that in `executorch/extension/llm/runner`,
ensuring that the exported model can be executed in `ExecuTorch` out-of-the-box.
"""
past_key_values = self.static_cache
outs = self.model(
input_ids=input_ids,
inputs_embeds=inputs_embeds,
cache_position=cache_position,
attention_mask=None,
past_key_values=past_key_values,
use_cache=True,
)
if hasattr(outs, "logits"):
# Returned outputs is `CausalLMOutputWithPast`
return outs.logits
else:
# Returned the `last_hidden_state` from `BaseModelOutputWithPast`
return outs.last_hidden_state
@staticmethod
def generate(
exported_program: torch.export.ExportedProgram,
prompt_token_ids: torch.Tensor,
max_new_tokens: int,
) -> torch.Tensor:
"""
Generate a sequence of tokens using an exported program.
This util function is designed to test exported models by simulating the generation process.
It processes the input prompt tokens sequentially (no parallel prefill).
This generate function is not intended to replace the original `generate` method, and the support
for leveraging the original `generate` is potentially planned!
Args:
exported_program (`torch.export.ExportedProgram`): The exported program generated via `torch.export`.
prompt_token_ids (`torch.Tensor`): Tensor representing the input prompt token IDs.
max_new_tokens (`int`): Maximum number of new tokens to generate. Note that the total generation
length is limited by both `max_new_tokens` and the model's cache size.
Returns:
torch.Tensor: A tensor containing the generated sequence of token IDs, including the original prompt tokens.
"""
device = prompt_token_ids.device
prompt_token_len = prompt_token_ids.shape[-1]
max_generation_length = prompt_token_len + max_new_tokens
for buffer_name, buffer in exported_program.named_buffers():
if buffer_name.startswith("key_cache"):
max_cache_len = buffer.shape[2]
max_generation_length = min(max_generation_length, max_cache_len)
break
response_tokens = []
for input_pos in range(min(max_generation_length, prompt_token_len)):
result = exported_program.module().forward(
input_ids=prompt_token_ids[:, input_pos : input_pos + 1],
cache_position=torch.tensor([input_pos], dtype=torch.long, device=device),
)
response_tokens.append(prompt_token_ids[0][input_pos].item())
current_token = torch.argmax(result[:, -1, :], dim=-1).item()
response_tokens.append(current_token)
while len(response_tokens) < max_generation_length:
result = exported_program.module().forward(
input_ids=torch.tensor([[current_token]], dtype=torch.long, device=device),
cache_position=torch.tensor([len(response_tokens)], dtype=torch.long, device=device),
)
current_token = torch.argmax(result[:, -1, :], dim=-1).item()
response_tokens.append(current_token)
return torch.tensor([response_tokens], dtype=torch.long, device=device)
class TorchExportableModuleWithHybridCache(torch.nn.Module):
"""
A recipe module designed to make a `PreTrainedModel` exportable with `torch.export`,
specifically for decoder-only LM to hybrid `StaticCache`. This module ensures that the
exported model is compatible with further lowering and execution in `ExecuTorch`.
"""
def __init__(
self,
model: PreTrainedModel,
batch_size: int | None = None,
max_cache_len: int | None = None,
device: torch.device | None = None,
) -> None:
"""
Initializes the exportable module.
Args:
model (`PreTrainedModel`): The pretrained model to wrap.
batch_size (`Optional[int]`): The batch size of the model. If not provided, we check if a value can be found
in `generation_config.cache_config` and otherwise we raise a ValueError.
max_cache_len (`Optional[int]`): The maximum cache length for generation. Same mechanism as `batch_size` if
not provided.
device (`Optional[torch.device]`): The device to use. If not provided, we check if a value can be found
in `generation_config.cache_config` and otherwise we use `model.device` (no error is raised).
Raises:
AssertionError: If the model doesn't have the expected configuration for hybrid StaticCache.
ValueError: If `batch_size` or `max_cache_len` is not provided, either as an argument or in `cache_config`.
"""
super().__init__()
self.model = model
config = model.config.get_text_config()
generation_config = model.generation_config
# Sanity checks
if generation_config is None:
raise AssertionError(
"The model must have a generation config to be exported with static caching. "
"Please set `generation_config` in `model`."
)
if not config.use_cache:
raise AssertionError("Model must have caching enabled.")
cache_config = {} if generation_config.cache_config is None else generation_config.cache_config
# Ensure batch_size and max_cache_len are set
if batch_size is None:
batch_size = cache_config.get("batch_size", None)
if batch_size is None:
raise ValueError("batch_size must be provided, either as an argument or in cache_config.")
if max_cache_len is None:
max_cache_len = cache_config.get("max_cache_len", None)
if max_cache_len is None:
raise ValueError("max_cache_len must be provided, either as an argument or in cache_config.")
# Infer device if not provided
if device is None:
device = cache_config.get("device", model.device)
# Initialize the cache
self.cache = StaticCache(config=config, max_cache_len=max_cache_len)
head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
num_heads = getattr(config, "num_key_value_heads", config.num_attention_heads)
dtype = self.model.dtype
# We need this call to initialize all the layers (otherwise it's done lazily, which is not exportable)
self.cache.early_initialization(batch_size, num_heads, head_dim, dtype, device)
# Register all key and value cache tensors as buffers
for i in range(len(self.cache)):
self.register_buffer(f"key_cache_{i}", self.cache.layers[i].keys, persistent=False)
self.register_buffer(f"value_cache_{i}", self.cache.layers[i].values, persistent=False)
def forward(
self,
input_ids: torch.LongTensor | None = None,
inputs_embeds: torch.Tensor | None = None,
cache_position: torch.Tensor | None = None,
) -> torch.Tensor:
"""
Forward pass of the module, which is compatible with the ExecuTorch llm runner.
Args:
input_ids (`torch.Tensor`): Tensor representing current input token id to the module.
inputs_embeds (`Optional[torch.Tensor]`): Tensor representing current input embeddings to the module.
cache_position (`torch.Tensor`): Tensor representing current input position in the cache.
Returns:
torch.Tensor: Logits output from the model.
"""
# Forward pass with the model
outputs = self.model(
input_ids=input_ids,
inputs_embeds=inputs_embeds,
cache_position=cache_position,
attention_mask=None,
past_key_values=self.cache,
use_cache=True,
)
# Return only the logits to simplify the export
return outputs.logits
def convert_and_export_with_cache(
model: PreTrainedModel,
example_input_ids: torch.Tensor | None = None,
example_cache_position: torch.Tensor | None = None,
dynamic_shapes: dict | None = None,
strict: bool | None = None,
):
"""
Convert a `PreTrainedModel` into an exportable module and export it using `torch.export`,
ensuring the exported model is compatible with `ExecuTorch`.
Args:
model (`PreTrainedModel`): The pretrained model to be exported.
example_input_ids (`Optional[torch.Tensor]`): Example input token id used by `torch.export`.
example_cache_position (`Optional[torch.Tensor]`): Example current cache position used by `torch.export`.
dynamic_shapes(`Optional[dict]`): Dynamic shapes used by `torch.export`.
strict(`Optional[bool]`): Flag to instruct `torch.export` to use `torchdynamo`.
Returns:
Exported program (`torch.export.ExportedProgram`): The exported program generated via `torch.export`.
"""
if not is_torch_greater_or_equal_than_2_3:
raise ImportError("torch >= 2.3 is required.")
import torch.export._trace
with torch.no_grad():
# TODO: The default inputs only work for text models. We need to add support for vision/audio models.
example_input_ids = (
example_input_ids
if example_input_ids is not None
else torch.tensor([[1]], dtype=torch.long, device=model.device)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | true |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/integrations/mistral.py | src/transformers/integrations/mistral.py | from tokenizers import Regex, Tokenizer, decoders, pre_tokenizers, processors
from tokenizers.models import BPE
from transformers.convert_slow_tokenizer import bytes_to_unicode
from transformers.tokenization_utils_tokenizers import PreTrainedTokenizerFast
class MistralConverter:
"""
A general tiktoken converter.
"""
def __init__(
self,
vocab=None,
pattern=r"""(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}{1,3}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+""",
add_prefix_space=False,
additional_special_tokens=None,
**kwargs,
):
self.vocab = vocab
self.pattern = pattern
self.add_prefix_space = add_prefix_space
self.additional_special_tokens = additional_special_tokens
def extract_vocab_merges_from_model(self, vocab: str):
bpe_ranks = vocab
byte_encoder = bytes_to_unicode()
def token_bytes_to_string(b):
return "".join([byte_encoder[ord(char)] for char in b.decode("latin-1")])
merges = []
vocab = {}
for idx, (token, rank) in enumerate(bpe_ranks.items()):
if token not in self.additional_special_tokens:
vocab[token_bytes_to_string(token)] = idx
if len(token) == 1:
continue
local = []
for index in range(1, len(token)):
piece_l, piece_r = token[:index], token[index:]
if piece_l in bpe_ranks and piece_r in bpe_ranks and (piece_l + piece_r) in bpe_ranks:
local.append((piece_l, piece_r, rank))
local = sorted(local, key=lambda x: (bpe_ranks[x[0]], bpe_ranks[x[1]]), reverse=False)
merges.extend(local)
else:
vocab[token] = idx
merges = sorted(merges, key=lambda val: val[2], reverse=False)
merges = [(token_bytes_to_string(val[0]), token_bytes_to_string(val[1])) for val in merges]
return vocab, merges
def tokenizer(self):
vocab_scores, merges = self.extract_vocab_merges_from_model(self.vocab)
tokenizer = Tokenizer(BPE(vocab_scores, merges, fuse_unk=False))
if hasattr(tokenizer.model, "ignore_merges"):
tokenizer.model.ignore_merges = True
return tokenizer
def converted(self) -> Tokenizer:
tokenizer = self.tokenizer()
tokenizer.pre_tokenizer = pre_tokenizers.Sequence(
[
pre_tokenizers.Split(Regex(self.pattern), behavior="isolated", invert=False),
pre_tokenizers.ByteLevel(add_prefix_space=self.add_prefix_space, use_regex=False),
]
)
tokenizer.decoder = decoders.ByteLevel()
tokenizer.add_special_tokens(self.additional_special_tokens)
tokenizer.post_processor = processors.ByteLevel(trim_offsets=False)
return tokenizer
def convert_tekken_tokenizer(tokenizer_file: str):
"""Convert a "tekken" tokenizer to a fast Tokenizer."""
# Tekken format -- need to use the Converter
from mistral_common.tokens.tokenizers.base import SpecialTokens
from mistral_common.tokens.tokenizers.mistral import MistralTokenizer
# Load directly using their lib
mistral_tokenizer = MistralTokenizer.from_file(tokenizer_file)
# Extract vocab and special tokens
vocab = mistral_tokenizer.instruct_tokenizer.tokenizer._tekken_token2id_nospecial
sorted_tokens = sorted(mistral_tokenizer.instruct_tokenizer.tokenizer._all_special_tokens, key=lambda x: x["rank"])
all_special = [token["token_str"] for token in sorted_tokens]
specials_tokens = {token: idx for idx, token in enumerate(all_special)}
specials_tokens.update(vocab)
vocab = specials_tokens
# TODO(juliendenize): expose this in mistral-common to avoid accessing private attributes
# and improve maintainability
pattern = mistral_tokenizer.instruct_tokenizer.tokenizer._model._pat_str
# Convert
tokenizer = PreTrainedTokenizerFast(
tokenizer_object=MistralConverter(
vocab=vocab, additional_special_tokens=all_special, pattern=pattern
).converted()
)
# Post-process
tokenizer.add_special_tokens({"additional_special_tokens": all_special})
MAP_SPECAL = {
"bos_token": SpecialTokens.bos.value,
"eos_token": SpecialTokens.eos.value,
"pad_token": SpecialTokens.pad.value,
"unk_token": SpecialTokens.unk.value,
}
for special_key, special_token in MAP_SPECAL.items():
if special_token in all_special:
tokenizer.add_special_tokens({special_key: special_token})
return tokenizer
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/integrations/quanto.py | src/transformers/integrations/quanto.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..core_model_loading import ConversionOps
from ..quantizers.quantizers_utils import get_module_from_name, should_convert_module
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
import torch.nn as nn
logger = logging.get_logger(__name__)
class QuantoQuantize(ConversionOps):
def __init__(self, hf_quantizer):
self.hf_quantizer = hf_quantizer
def convert(
self,
input_dict: dict[str, list[torch.Tensor]],
model: torch.nn.Module | None = None,
full_layer_name: str | None = None,
missing_keys: list[str] | None = None,
**kwargs,
) -> dict[str, torch.Tensor]:
_, value = tuple(input_dict.items())[0]
value = value[0]
from ..modeling_utils import _load_parameter_into_model
_load_parameter_into_model(model, full_layer_name, value)
module, _ = get_module_from_name(model, full_layer_name)
# Need to set those to a specific value, otherwise they will remain on meta device ...
module.input_scale = torch.ones(module.input_scale.shape)
module.output_scale = torch.ones(module.output_scale.shape)
# quantize
module.freeze()
module.weight.requires_grad = False
module._is_hf_initialized = True
# need to discard some missing keys we already updated the module in freeze.
module_name = full_layer_name.rsplit(".", 1)[0]
missing_keys.discard(f"{module_name}.weight")
missing_keys.discard(f"{module_name}.input_scale")
missing_keys.discard(f"{module_name}.output_scale")
return {}
def replace_with_quanto_layers(
model,
quantization_config=None,
modules_to_not_convert: list[str] | None = None,
):
"""
Public method that recursively replaces the Linear layers of the given model with Quanto quantized layers.
Returns the converted model and a boolean that indicates if the conversion has been successful or not.
Args:
model (`torch.nn.Module`):
The model to convert, can be any `torch.nn.Module` instance.
quantization_config (`QuantoConfig`, defaults to `None`):
The quantization config object that contains the quantization parameters.
modules_to_not_convert (`list`, *optional*, defaults to `None`):
A list of modules to not convert. If a module name is in the list (e.g. `lm_head`), it will not be
converted.
"""
from optimum.quanto import QLayerNorm, QLinear, qfloat8, qint2, qint4, qint8
w_mapping = {"float8": qfloat8, "int8": qint8, "int4": qint4, "int2": qint2}
a_mapping = {None: None, "float8": qfloat8, "int8": qint8}
has_been_replaced = False
for module_name, module in model.named_modules():
if not should_convert_module(module_name, modules_to_not_convert):
continue
with torch.device("meta"):
new_module = None
if isinstance(module, nn.Linear):
new_module = QLinear(
in_features=module.in_features,
out_features=module.out_features,
bias=module.bias is not None,
dtype=module.weight.dtype,
weights=w_mapping[quantization_config.weights],
activations=a_mapping[quantization_config.activations],
)
elif isinstance(module, torch.nn.LayerNorm) and quantization_config.activations is not None:
new_module = QLayerNorm(
module.normalized_shape,
module.eps,
module.elementwise_affine,
module.bias is not None,
activations=a_mapping[quantization_config.activations],
)
if new_module is not None:
has_been_replaced = True
model.set_submodule(module_name, new_module)
if not has_been_replaced:
logger.warning(
"You are loading your model using quanto but no linear modules were found in your model."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug."
)
return model
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/integrations/finegrained_fp8.py | src/transformers/integrations/finegrained_fp8.py | # coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..core_model_loading import ConversionOps
from ..quantizers.quantizers_utils import should_convert_module
from ..utils import is_torch_accelerator_available, is_torch_available, logging
if is_torch_available():
import torch
import torch.nn as nn
import triton
import triton.language as tl
from torch.nn import functional as F
logger = logging.get_logger(__name__)
try:
_FP8_DTYPE = torch.float8_e4m3fn
_FP8_MIN = torch.finfo(_FP8_DTYPE).min
_FP8_MAX = torch.finfo(_FP8_DTYPE).max
except AttributeError:
_FP8_DTYPE = None
_FP8_MIN, _FP8_MAX = -448, 448
logger.warning_once("torch.float8_e4m3fn not available")
# Copied from https://huggingface.co/deepseek-ai/DeepSeek-V3/blob/main/inference/kernel.py
@triton.jit
def act_quant_kernel(x_ptr, y_ptr, s_ptr, BLOCK_SIZE: tl.constexpr):
pid = tl.program_id(axis=0)
offs = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
x = tl.load(x_ptr + offs).to(tl.float32)
s = tl.max(tl.abs(x)) / 448.0
y = x / s
y = y.to(y_ptr.dtype.element_ty)
tl.store(y_ptr + offs, y)
tl.store(s_ptr + pid, s)
def act_quant(x: torch.Tensor, block_size: int = 128) -> tuple[torch.Tensor, torch.Tensor]:
assert x.is_contiguous()
assert x.shape[-1] % block_size == 0
y = torch.empty_like(x, dtype=torch.float8_e4m3fn)
s = x.new_empty(*x.size()[:-1], x.size(-1) // block_size, dtype=torch.float32)
def grid(meta):
return (triton.cdiv(x.numel(), meta["BLOCK_SIZE"]),)
act_quant_kernel[grid](x, y, s, BLOCK_SIZE=block_size)
return y, s
# Adapted from https://github.com/sgl-project/sglang/blob/main/python/sglang/srt/layers/quantization/fp8_kernel.py
@triton.jit
def _w8a8_block_fp8_matmul(
# Pointers to inputs and output
A,
B,
C,
As,
Bs,
# Shape for matmul
M,
N,
K,
# Block size for block-wise quantization
group_n,
group_k,
# Stride for inputs and output
stride_am,
stride_ak,
stride_bk,
stride_bn,
stride_cm,
stride_cn,
stride_As_m,
stride_As_k,
stride_Bs_k,
stride_Bs_n,
# Meta-parameters
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE_M: tl.constexpr,
):
"""Triton-accelerated function used to perform linear operations (dot
product) on input tensors `A` and `B` with block-wise quantization, and
store the result in output tensor `C`.
"""
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + (pid % group_size_m)
pid_n = (pid % num_pid_in_group) // group_size_m
offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M
offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N
offs_k = tl.arange(0, BLOCK_SIZE_K)
a_ptrs = A + (offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak)
b_ptrs = B + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn)
As_ptrs = As + offs_am * stride_As_m
offs_bsn = offs_bn // group_n
Bs_ptrs = Bs + offs_bsn * stride_Bs_n
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for k in range(0, tl.cdiv(K, BLOCK_SIZE_K)):
a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0)
b = tl.load(b_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0)
k_start = k * BLOCK_SIZE_K
offs_ks = k_start // group_k
a_s = tl.load(As_ptrs + offs_ks * stride_As_k)
b_s = tl.load(Bs_ptrs + offs_ks * stride_Bs_k)
accumulator += tl.dot(a, b) * a_s[:, None] * b_s[None, :]
a_ptrs += BLOCK_SIZE_K * stride_ak
b_ptrs += BLOCK_SIZE_K * stride_bk
if C.dtype.element_ty == tl.bfloat16:
c = accumulator.to(tl.bfloat16)
elif C.dtype.element_ty == tl.float16:
c = accumulator.to(tl.float16)
else:
c = accumulator.to(tl.float32)
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
c_ptrs = C + stride_cm * offs_cm[:, None] + stride_cn * offs_cn[None, :]
c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N)
tl.store(c_ptrs, c, mask=c_mask)
@triton.jit
def _w8a8_block_fp8_matmul_per_tensor(
# Pointers to inputs and output
A,
B,
C,
As,
Bs,
# Shape for matmul
M,
N,
K,
# Block size for block-wise quantization
group_n,
group_k,
# Stride for inputs and output
stride_am,
stride_ak,
stride_bk,
stride_bn,
stride_cm,
stride_cn,
# Meta-parameters
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE_M: tl.constexpr,
):
"""Triton-accelerated function used to perform linear operations (dot
product) on input tensors `A` and `B` with per-tensor quantization, and
store the result in output tensor `C`.
"""
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + (pid % group_size_m)
pid_n = (pid % num_pid_in_group) // group_size_m
offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M
offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N
offs_k = tl.arange(0, BLOCK_SIZE_K)
a_ptrs = A + (offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak)
b_ptrs = B + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn)
scale_a = tl.load(As)
scale_b = tl.load(Bs)
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for k in range(0, tl.cdiv(K, BLOCK_SIZE_K)):
a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0)
b = tl.load(b_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0)
accumulator += tl.dot(a, b) * scale_a * scale_b
a_ptrs += BLOCK_SIZE_K * stride_ak
b_ptrs += BLOCK_SIZE_K * stride_bk
if C.dtype.element_ty == tl.bfloat16:
c = accumulator.to(tl.bfloat16)
elif C.dtype.element_ty == tl.float16:
c = accumulator.to(tl.float16)
else:
c = accumulator.to(tl.float32)
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
c_ptrs = C + stride_cm * offs_cm[:, None] + stride_cn * offs_cn[None, :]
c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N)
tl.store(c_ptrs, c, mask=c_mask)
def w8a8_block_fp8_matmul_triton(
A: torch.Tensor,
B: torch.Tensor,
As: torch.Tensor,
Bs: torch.Tensor,
block_size: list[int],
output_dtype: torch.dtype = torch.float32,
) -> torch.Tensor:
"""This function performs matrix multiplication with block-wise
quantization.
It takes two input tensors `A` and `B` with scales `As` and `Bs`.
The output is returned in the specified `output_dtype`.
Args:
A: The input tensor, e.g., activation.
B: The input tensor, e.g., weight.
As: The per-token-group quantization scale for `A`.
Bs: The per-block quantization scale for `B`.
block_size: The block size for per-block quantization. It should
be 2-dim, e.g., [128, 128].
output_dytpe: The dtype of the returned tensor.
Returns:
torch.Tensor: The result of matmul.
"""
if block_size is None:
block_n, block_k = 128, 128
else:
assert len(block_size) == 2
block_n, block_k = block_size[0], block_size[1]
# if we have per-tensor quantization, we use 128x128 block size for tiled matmul multiplication
if block_n == B.shape[-2] and block_k == B.shape[-1]:
block_n = 128
block_k = 128
assert A.shape[-1] == B.shape[-1]
if As.numel() != 1:
assert A.shape[:-1] == As.shape[:-1] and A.is_contiguous()
assert triton.cdiv(A.shape[-1], block_k) == As.shape[-1]
M = A.numel() // A.shape[-1]
N, K = B.shape
assert B.ndim == 2 and B.is_contiguous()
if Bs.numel() != 1:
assert Bs.ndim == 2
assert triton.cdiv(N, block_n) == Bs.shape[0], f"{N}, {block_n}, {Bs.shape}"
assert triton.cdiv(K, block_k) == Bs.shape[1], f"{K}, {block_k}, {Bs.shape}"
C_shape = A.shape[:-1] + (N,)
C = A.new_empty(C_shape, dtype=output_dtype)
BLOCK_SIZE_M = 128
if M < BLOCK_SIZE_M:
BLOCK_SIZE_M = triton.next_power_of_2(M)
BLOCK_SIZE_M = max(BLOCK_SIZE_M, 16)
BLOCK_SIZE_K = block_k
assert block_k % BLOCK_SIZE_K == 0
BLOCK_SIZE_N = block_n
def grid(META):
return (triton.cdiv(M, META["BLOCK_SIZE_M"]) * triton.cdiv(N, META["BLOCK_SIZE_N"]),)
if As.numel() == 1 and Bs.numel() == 1:
_w8a8_block_fp8_matmul_per_tensor[grid](
A,
B,
C,
As,
Bs,
M,
N,
K,
block_n,
block_k,
A.stride(-2),
A.stride(-1),
B.stride(1),
B.stride(0),
C.stride(-2),
C.stride(-1),
BLOCK_SIZE_M=BLOCK_SIZE_M,
BLOCK_SIZE_N=BLOCK_SIZE_N,
BLOCK_SIZE_K=BLOCK_SIZE_K,
GROUP_SIZE_M=8,
)
else:
_w8a8_block_fp8_matmul[grid](
A,
B,
C,
As,
Bs,
M,
N,
K,
block_n,
block_k,
A.stride(-2),
A.stride(-1),
B.stride(1),
B.stride(0),
C.stride(-2),
C.stride(-1),
As.stride(-2),
As.stride(-1),
Bs.stride(1),
Bs.stride(0),
BLOCK_SIZE_M=BLOCK_SIZE_M,
BLOCK_SIZE_N=BLOCK_SIZE_N,
BLOCK_SIZE_K=BLOCK_SIZE_K,
GROUP_SIZE_M=8,
)
return C
# Python version of the above triton function, it's much slower than the triton version, for testing
@torch.compile
def w8a8_block_fp8_matmul_compile(
input_q: torch.Tensor, # [batch, seq_len, hidden_dim]
weight_q: torch.Tensor, # [out_features, hidden_dim]
input_scale: torch.Tensor, # [batch * seq_len, num_input_groups]
weight_scale: torch.Tensor, # [num_weight_blocks_m, num_weight_blocks_n]
block_size: tuple[int, int] | None = None, # (M=128, N=128) for weights for example
output_dtype: torch.dtype = torch.float32,
) -> torch.Tensor:
"""
Performs blocked matrix multiplication with FP8 quantized matrices.
Args:
input_q: Quantized input tensor with 1x128 block quantization
weight_q: Quantized weight tensor with 128x128 block quantization
input_scale: Scaling factors for input blocks
weight_scale: Scaling factors for weight blocks
block_size: Tuple of (M, N) for weight block dimensions
output_dtype: Desired output dtype
"""
batch_size, seq_len, hidden_dim = input_q.shape if input_q.ndim == 3 else (1, input_q.shape[0], input_q.shape[1])
out_features = weight_q.shape[0]
# Reshape input for batched matmul
input_reshaped = input_q.view(-1, hidden_dim) # [batch*seq_len, hidden_dim]
input_scale_reshaped = input_scale.view(input_scale.shape[0], -1) # [batch*seq_len, 1]
# Calculate number of blocks
num_weight_blocks_m = out_features // block_size[0]
num_weight_blocks_n = hidden_dim // block_size[1]
output = torch.zeros((batch_size * seq_len, out_features), dtype=torch.float32, device=input_q.device)
for i in range(num_weight_blocks_m):
m_start = i * block_size[0]
m_end = m_start + block_size[0]
for j in range(num_weight_blocks_n):
n_start = j * block_size[1]
n_end = n_start + block_size[1]
# Extract current blocks
input_block = input_reshaped[:, n_start:n_end]
weight_block = weight_q[m_start:m_end, n_start:n_end]
# Get corresponding scales
curr_input_scale = input_scale_reshaped[:, j : j + 1] # [batch*seq_len, 1]
curr_weight_scale = weight_scale[i, j] # scalar
block_result = (
torch._scaled_mm(
input_block,
weight_block.t(),
scale_a=torch.tensor(1, dtype=torch.float32, device=input_q.device),
scale_b=curr_weight_scale,
out_dtype=output_dtype,
)
* curr_input_scale
)
output[:, m_start:m_end] += block_result
output = output.view(batch_size, seq_len, out_features)
return output.to(output_dtype)
class FP8Linear(nn.Linear):
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = False,
dtype=torch.float8_e4m3fn,
block_size: tuple[int, int] | None = None,
activation_scheme="dynamic",
):
super().__init__(in_features, out_features)
# If block size is None, it means that we are doing per-tensor quantization
self.block_size = block_size
self.activation_scheme = activation_scheme
self.weight = torch.nn.Parameter(torch.empty(out_features, in_features, dtype=dtype))
if self.block_size is None:
self.weight_scale_inv = nn.Parameter(torch.tensor(1.0, dtype=torch.float32))
else:
scale_out_features = (out_features + self.block_size[0] - 1) // self.block_size[0]
scale_in_features = (in_features + self.block_size[1] - 1) // self.block_size[1]
self.weight_scale_inv = nn.Parameter(
torch.empty(scale_out_features, scale_in_features, dtype=torch.float32)
)
if self.activation_scheme == "static":
self.activation_scale = nn.Parameter(torch.tensor(1.0, dtype=torch.float32))
if bias:
self.bias = nn.Parameter(torch.empty(self.out_features))
else:
self.register_parameter("bias", None)
def forward(self, input: torch.Tensor) -> torch.Tensor:
if self.weight.element_size() > 1:
return F.linear(input, self.weight, self.bias)
else:
if isinstance(self.weight, torch.distributed.tensor.DTensor):
weight = self.weight._local_tensor.contiguous()
scale_inv = self.weight_scale_inv._local_tensor.contiguous()
else:
weight = self.weight.contiguous()
scale_inv = self.weight_scale_inv.contiguous()
# Context manager used to switch among the available accelerators
device_type = torch.accelerator.current_accelerator().type if is_torch_accelerator_available() else "cuda"
torch_accelerator_module = getattr(torch, device_type, torch.cuda)
with torch_accelerator_module.device(input.device):
if self.activation_scheme == "dynamic":
qinput, scale = act_quant(input, self.block_size[1])
elif self.activation_scheme == "static":
scale = self.activation_scale.to(torch.float32)
qinput = (input / scale).clamp(min=_FP8_MIN, max=_FP8_MAX).to(torch.float8_e4m3fn)
else:
raise NotImplementedError("Not supported")
output = w8a8_block_fp8_matmul_triton(
qinput,
weight,
scale,
scale_inv,
self.block_size,
output_dtype=input.dtype,
)
# Blocks the CPU until all accelerator operations on the specified device are complete. It is used to ensure that the results of the
# preceding operations are ready before proceeding
torch_accelerator_module.synchronize()
if self.bias is not None:
output = output + self.bias
# output = torch.nan_to_num(output, nan=0.0)
return output.to(dtype=input.dtype)
def _ceil_div(a, b):
return (a + b - 1) // b
class FP8Expert(nn.Module):
def __init__(self, config, block_size, dtype=torch.float8_e4m3fn):
super().__init__()
from ..activations import ACT2FN
self.block_size = block_size
self.num_experts = config.num_local_experts
self.hidden_dim = config.hidden_size
self.intermediate_dim = config.intermediate_size
Wg_out, Wg_in = 2 * self.intermediate_dim, self.hidden_dim
Wd_out, Wd_in = self.hidden_dim, self.intermediate_dim
self.gate_up_proj = nn.Parameter(torch.zeros(self.num_experts, Wg_out, Wg_in, dtype=dtype))
self.down_proj = nn.Parameter(torch.zeros(self.num_experts, Wd_out, Wd_in, dtype=dtype))
bo, bi = self.block_size
# gate_up tiles: ceil(Wg_out/bo) x ceil(Wg_in/bi)
gu_scale_o = _ceil_div(Wg_out, bo)
gu_scale_i = _ceil_div(Wg_in, bi)
self.gate_up_proj_scale_inv = nn.Parameter(
torch.zeros(self.num_experts, gu_scale_o, gu_scale_i, dtype=torch.float32)
)
# down tiles: ceil(Wd_out/bo) x ceil(Wd_in/bi)
dp_scale_o = _ceil_div(Wd_out, bo)
dp_scale_i = _ceil_div(Wd_in, bi)
self.down_proj_scale_inv = nn.Parameter(
torch.zeros(self.num_experts, dp_scale_o, dp_scale_i, dtype=torch.float32)
)
# (Optional) bias per projection — many MoEs omit bias; keep None to match your FP8Linear default
self.register_parameter("gate_up_bias", None)
self.register_parameter("down_bias", None)
# Activation used in the MLP (same as your config / ACT2FN)
# Keep a handle here; actual usage happens in forward of your MoE block
self.act_fn = ACT2FN[config.hidden_act]
def forward(
self,
hidden_states: torch.Tensor,
top_k_index: torch.Tensor,
top_k_weights: torch.Tensor,
) -> torch.Tensor:
final_hidden_states = torch.zeros_like(hidden_states)
num_experts = top_k_weights.shape[1]
with torch.no_grad():
expert_mask = torch.nn.functional.one_hot(top_k_index, num_classes=num_experts + 1)
expert_mask = expert_mask.permute(2, 1, 0)
expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero()
for expert_idx in expert_hit:
expert_idx = expert_idx[0]
if expert_idx == num_experts:
continue
_, token_idx = torch.where(expert_mask[expert_idx])
current_state = hidden_states.index_select(0, token_idx)
gate, up = self.linear(
current_state, self.gate_up_proj[expert_idx], self.gate_up_proj_scale_inv[expert_idx]
).chunk(2, dim=-1)
current_hidden_states = self.act_fn(gate) * up
current_hidden_states = self.linear(
current_hidden_states, self.down_proj[expert_idx], self.down_proj_scale_inv[expert_idx]
)
routing_weights = top_k_weights[token_idx, expert_idx].unsqueeze(-1)
current_hidden_states = current_hidden_states * routing_weights.to(current_hidden_states.dtype)
final_hidden_states.index_add_(0, token_idx, current_hidden_states.to(final_hidden_states.dtype))
return final_hidden_states
def linear(self, input: torch.Tensor, weight: torch.Tensor, weight_scale_inv: torch.Tensor) -> torch.Tensor:
if weight.element_size() > 1:
return F.linear(input, weight, None)
else:
# Context manager used to switch among the available accelerators
device_type = torch.accelerator.current_accelerator().type if is_torch_accelerator_available() else "cuda"
torch_accelerator_module = getattr(torch, device_type, torch.cuda)
with torch_accelerator_module.device(input.device):
qinput, scale = act_quant(input, self.block_size[1])
output = w8a8_block_fp8_matmul_triton(
qinput,
weight,
scale,
weight_scale_inv,
self.block_size,
output_dtype=input.dtype,
)
# Blocks the CPU until all accelerator operations on the specified device are complete. It is used to ensure that the results of the
# preceding operations are ready before proceeding
torch_accelerator_module.synchronize()
return output.to(dtype=input.dtype)
def replace_with_fp8_linear(
model, modules_to_not_convert: list[str] | None = None, quantization_config=None, pre_quantized=False
):
"""
A helper function to replace all `torch.nn.Linear` modules by `FP8Linear` modules.
Parameters:
model (`torch.nn.Module`):
Input model or `torch.nn.Module` as the function is run recursively.
modules_to_not_convert (`list[`str`]`, *optional*, defaults to `None`):
Names of the modules to not convert. In practice we keep the `lm_head` in full precision for numerical stability reasons.
quantization_config (`FbgemmFp8Config`):
The quantization config object that contains the quantization parameters.
pre_quantized (`book`, defaults to `False`):
Whether the model is pre-quantized or not
"""
if quantization_config.dequantize:
return model
has_been_replaced = False
for module_name, module in model.named_modules():
if not should_convert_module(module_name, modules_to_not_convert):
continue
# we need this to correctly materialize the weights during quantization
module_kwargs = {} if pre_quantized else {"dtype": None}
new_module = None
with torch.device("meta"):
if module_name.endswith(".experts"):
new_module = FP8Expert(
config=model.config, block_size=quantization_config.weight_block_size, **module_kwargs
)
elif isinstance(module, nn.Linear):
new_module = FP8Linear(
in_features=module.in_features,
out_features=module.out_features,
bias=module.bias is not None,
activation_scheme=quantization_config.activation_scheme,
block_size=quantization_config.weight_block_size,
**module_kwargs,
)
if new_module is not None:
model.set_submodule(module_name, new_module)
has_been_replaced = True
if not has_been_replaced:
logger.warning(
"You are loading your model using fp8 but no linear modules were found in your model."
" Please double check your model architecture."
)
return model
class Fp8Quantize(ConversionOps):
"""
A quantization operation that creates two tensors, weight and scale out of a weight.
"""
def __init__(self, hf_quantizer):
self.hf_quantizer = hf_quantizer
def convert(self, input_dict: torch.Tensor, **kwargs) -> dict[str, torch.Tensor]:
# Unpack single key/value (value may be wrapped in a list)
target_keys, value = tuple(input_dict.items())[0]
value = value[0]
# Resolve block size (support dict-like or attr-like quant_config)
block_size = None
if self.hf_quantizer.quantization_config is not None:
if isinstance(self.hf_quantizer.quantization_config, dict):
block_size = self.hf_quantizer.quantization_config.get("weight_block_size")
else:
block_size = getattr(self.hf_quantizer.quantization_config, "weight_block_size", None)
if block_size is None:
block_size = (value.shape[-2], value.shape[-1])
block_m, block_n = block_size
rows, cols = value.shape[-2], value.shape[-1]
# Enforce exact tiling like your original
if rows % block_m != 0 or cols % block_n != 0:
raise ValueError(
f"Matrix dimensions ({rows}, {cols}) must be divisible by block sizes ({block_m}, {block_n}). for {target_keys}"
)
# Leading dims can be empty (2D) or include num_experts/... (3D+)
leading_shape = value.shape[:-2]
rows_tiles = rows // block_m
cols_tiles = cols // block_n
original_shape = value.shape
value_fp32 = value.to(torch.float32)
# Reshape to (..., rows_tiles, block_m, cols_tiles, block_n)
reshaped = value_fp32.reshape(*leading_shape, rows_tiles, block_m, cols_tiles, block_n)
# Per-tile max-abs over the block dims
# dims: block_m is at -3, block_n is at -1 after the reshape
max_abs = reshaped.abs().amax(dim=(-3, -1))
safe_max_abs = torch.where(max_abs > 0, max_abs, torch.ones_like(max_abs))
# Tile scale (we store inverse scale like your Linear: weight_scale_inv)
scales = _FP8_MAX / safe_max_abs
scales = torch.where(max_abs > 0, scales, torch.ones_like(scales)) # keep zeros stable
# Broadcast scales back over the block dims and quantize
# max_abs/scales shape: (..., rows_tiles, cols_tiles)
scales_broadcast = scales.unsqueeze(-1).unsqueeze(-3) # -> (..., rows_tiles, 1, cols_tiles, 1)
scaled = reshaped * scales_broadcast
quantized = torch.clamp(scaled, min=_FP8_MIN, max=_FP8_MAX).to(_FP8_DTYPE)
quantized = quantized.reshape(original_shape)
inv_scales = (1.0 / scales).to(torch.float32) # shape: (*leading, rows_tiles, cols_tiles)
if target_keys.endswith("weight"):
scale_key = target_keys.rsplit(".", 1)[0] + ".weight_scale_inv"
else:
scale_key = target_keys + "_scale_inv"
# Return both quantized weights and per-tile inverse scales (keeps leading dims, e.g., num_experts)
return {
target_keys: quantized,
scale_key: inv_scales,
}
class Fp8Dequantize(ConversionOps):
"""Inverse operation of :class:`Fp8Quantize`. Takes a pair (weight, scale) and reconstructs the fp32 tensor."""
def __init__(self, hf_quantizer):
self.hf_quantizer = hf_quantizer
def convert(
self,
input_dict: dict[str, torch.Tensor],
full_layer_name: str | None = None,
**kwargs,
) -> dict[str, torch.Tensor]:
if len(input_dict) < 2:
# case where we only got weights, need to check for "weight$"
return {full_layer_name: input_dict["weight$"]}
quantized = input_dict["weight$"][0]
scales = input_dict["weight_scale_inv"][0]
rows, cols = quantized.shape[-2:]
block_size = self.hf_quantizer.quantization_config.weight_block_size
if block_size is None:
block_size = (quantized.shape[-2], quantized.shape[-1])
block_m, block_n = block_size
if rows % block_m != 0 or cols % block_n != 0:
raise ValueError(
f"Matrix dimensions ({rows}, {cols}) must be divisible by block sizes ({block_m}, {block_n})."
)
quantized = quantized.to(scales.dtype)
reshaped = quantized.reshape(-1, rows // block_m, block_m, cols // block_n, block_n)
expanded_scales = scales.reshape(-1, rows // block_m, cols // block_n)
expanded_scales = expanded_scales.unsqueeze(-1).unsqueeze(2)
dequantized = reshaped * expanded_scales
return {
full_layer_name: dequantized.reshape(quantized.shape),
}
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/integrations/integration_utils.py | src/transformers/integrations/integration_utils.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Integrations with other Python libraries.
"""
import copy
import functools
import importlib.metadata
import importlib.util
import json
import numbers
import os
import re
import shutil
import sys
import tempfile
import warnings
from dataclasses import fields
from enum import Enum
from pathlib import Path
from typing import TYPE_CHECKING, Any, Literal
import numpy as np
import packaging.version
from transformers.utils.import_utils import _is_package_available
if os.getenv("WANDB_MODE") == "offline":
print("[INFO] Running in WANDB offline mode")
from .. import PreTrainedModel, TrainingArguments
from .. import __version__ as version
from ..utils import (
PushToHubMixin,
flatten_dict,
is_datasets_available,
is_pandas_available,
is_torch_available,
logging,
)
logger = logging.get_logger(__name__)
if is_torch_available():
import torch
import torch.distributed as dist
# comet_ml requires to be imported before any ML frameworks
_MIN_COMET_VERSION = "3.43.2"
try:
_comet_version = importlib.metadata.version("comet_ml")
_is_comet_installed = True
_is_comet_recent_enough = packaging.version.parse(_comet_version) >= packaging.version.parse(_MIN_COMET_VERSION)
# Check if the Comet API Key is set
import comet_ml
if comet_ml.config.get_config("comet.api_key") is not None:
_is_comet_configured = True
else:
_is_comet_configured = False
except (importlib.metadata.PackageNotFoundError, ImportError, ValueError, TypeError, AttributeError, KeyError):
_comet_version = None
_is_comet_installed = False
_is_comet_recent_enough = False
_is_comet_configured = False
_has_neptune = (
importlib.util.find_spec("neptune") is not None or importlib.util.find_spec("neptune-client") is not None
)
if TYPE_CHECKING and _has_neptune:
try:
_neptune_version = importlib.metadata.version("neptune")
logger.info(f"Neptune version {_neptune_version} available.")
except importlib.metadata.PackageNotFoundError:
try:
_neptune_version = importlib.metadata.version("neptune-client")
logger.info(f"Neptune-client version {_neptune_version} available.")
except importlib.metadata.PackageNotFoundError:
_has_neptune = False
from .. import modelcard # noqa: E402
from ..trainer_callback import ProgressCallback, TrainerCallback # noqa: E402
from ..trainer_utils import PREFIX_CHECKPOINT_DIR, BestRun, IntervalStrategy # noqa: E402
from ..training_args import ParallelMode # noqa: E402
from ..utils import ENV_VARS_TRUE_VALUES, is_torch_xla_available # noqa: E402
# Integration functions:
def is_wandb_available():
if importlib.util.find_spec("wandb") is not None:
import wandb
# wandb might still be detected by find_spec after an uninstall (leftover files or metadata), but not actually
# import correctly. To confirm it's fully installed and usable, we check for a key attribute like "run".
return hasattr(wandb, "run")
else:
return False
def is_trackio_available():
return importlib.util.find_spec("trackio") is not None
def is_clearml_available():
return importlib.util.find_spec("clearml") is not None
def is_comet_available():
if _is_comet_installed is False:
return False
if _is_comet_recent_enough is False:
logger.warning(
"comet_ml version %s is installed, but version %s or higher is required. "
"Please update comet_ml to the latest version to enable Comet logging with pip install 'comet-ml>=%s'.",
_comet_version,
_MIN_COMET_VERSION,
_MIN_COMET_VERSION,
)
return False
if _is_comet_configured is False:
logger.warning(
"comet_ml is installed but the Comet API Key is not configured. "
"Please set the `COMET_API_KEY` environment variable to enable Comet logging. "
"Check out the documentation for other ways of configuring it: "
"https://www.comet.com/docs/v2/guides/experiment-management/configure-sdk/#set-the-api-key"
)
return False
return True
def is_tensorboard_available():
return importlib.util.find_spec("tensorboard") is not None or importlib.util.find_spec("tensorboardX") is not None
def is_optuna_available():
return importlib.util.find_spec("optuna") is not None
def is_ray_available():
return importlib.util.find_spec("ray") is not None
def is_ray_tune_available():
if not is_ray_available():
return False
return importlib.util.find_spec("ray.tune") is not None
def is_azureml_available():
if importlib.util.find_spec("azureml") is None:
return False
if importlib.util.find_spec("azureml.core") is None:
return False
return importlib.util.find_spec("azureml.core.run") is not None
def is_mlflow_available():
if os.getenv("DISABLE_MLFLOW_INTEGRATION", "FALSE").upper() == "TRUE":
return False
return importlib.util.find_spec("mlflow") is not None
def is_dagshub_available():
return None not in [importlib.util.find_spec("dagshub"), importlib.util.find_spec("mlflow")]
def is_neptune_available():
return _has_neptune
def is_codecarbon_available():
return importlib.util.find_spec("codecarbon") is not None
def is_flytekit_available():
return importlib.util.find_spec("flytekit") is not None
def is_flyte_deck_standard_available():
if not is_flytekit_available():
return False
return importlib.util.find_spec("flytekitplugins.deck") is not None
def is_dvclive_available():
return importlib.util.find_spec("dvclive") is not None
def is_swanlab_available():
return importlib.util.find_spec("swanlab") is not None
def hp_params(trial):
if is_optuna_available():
import optuna
if isinstance(trial, optuna.trial.BaseTrial):
return trial.params
if is_ray_tune_available():
if isinstance(trial, dict):
return trial
if is_wandb_available():
if isinstance(trial, dict):
return trial
raise RuntimeError(f"Unknown type for trial {trial.__class__}")
def run_hp_search_optuna(trainer, n_trials: int, direction: str, **kwargs) -> BestRun:
import optuna
from accelerate.utils.memory import release_memory
if trainer.args.process_index == 0:
def _objective(trial: optuna.Trial, checkpoint_dir=None):
checkpoint = None
if checkpoint_dir:
for subdir in os.listdir(checkpoint_dir):
if subdir.startswith(PREFIX_CHECKPOINT_DIR):
checkpoint = os.path.join(checkpoint_dir, subdir)
trainer.objective = None
if trainer.args.world_size > 1:
if trainer.args.parallel_mode != ParallelMode.DISTRIBUTED:
raise RuntimeError("only support DDP optuna HPO for ParallelMode.DISTRIBUTED currently.")
trainer.hp_space(trial)
fixed_trial = optuna.trial.FixedTrial(trial.params, trial.number)
trial_main_rank_list = [fixed_trial]
torch.distributed.broadcast_object_list(trial_main_rank_list, src=0)
trainer.train(resume_from_checkpoint=checkpoint, trial=trial)
else:
trainer.train(resume_from_checkpoint=checkpoint, trial=trial)
# If there hasn't been any evaluation during the training loop.
if getattr(trainer, "objective", None) is None:
metrics = trainer.evaluate()
trainer.objective = trainer.compute_objective(metrics)
# Free GPU memory
trainer.model_wrapped, trainer.model = release_memory(trainer.model_wrapped, trainer.model)
trainer.accelerator.clear()
return trainer.objective
timeout = kwargs.pop("timeout", None)
n_jobs = kwargs.pop("n_jobs", 1)
gc_after_trial = kwargs.pop("gc_after_trial", False)
catch = kwargs.pop("catch", ())
directions = direction if isinstance(direction, list) else None
direction = None if directions is not None else direction
study = optuna.create_study(direction=direction, directions=directions, **kwargs)
study.optimize(
_objective, n_trials=n_trials, timeout=timeout, n_jobs=n_jobs, gc_after_trial=gc_after_trial, catch=catch
)
if not study._is_multi_objective():
best_trial = study.best_trial
return BestRun(str(best_trial.number), best_trial.value, best_trial.params)
else:
best_trials = study.best_trials
return [BestRun(str(best.number), best.values, best.params) for best in best_trials]
else:
for i in range(n_trials):
trainer.objective = None
trial_main_rank_list = [None]
if trainer.args.parallel_mode != ParallelMode.DISTRIBUTED:
raise RuntimeError("only support DDP optuna HPO for ParallelMode.DISTRIBUTED currently.")
torch.distributed.broadcast_object_list(trial_main_rank_list, src=0)
trainer.train(resume_from_checkpoint=None, trial=trial_main_rank_list[0])
# If there hasn't been any evaluation during the training loop.
if getattr(trainer, "objective", None) is None:
metrics = trainer.evaluate()
trainer.objective = trainer.compute_objective(metrics)
return None
def run_hp_search_ray(trainer, n_trials: int, direction: str, **kwargs) -> BestRun:
"""
Environment:
- **RAY_SCOPE** (`str`, *optional*, defaults to `"last"`):
The scope to use when doing hyperparameter search with Ray. By default, `"last"` will be used. Ray
will then use the last checkpoint of all trials, compare those, and select the best one. However,
other options are also available. See the Ray documentation (https://docs.ray.io/en/latest/tune/api_docs/analysis.html#ray.tune.ExperimentAnalysis.get_best_trial)
for more options
"""
import ray
import ray.tune
def _objective(trial: dict, local_trainer):
try:
from transformers.utils.notebook import NotebookProgressCallback
if local_trainer.pop_callback(NotebookProgressCallback):
local_trainer.add_callback(ProgressCallback)
except ModuleNotFoundError:
pass
local_trainer.objective = None
checkpoint = ray.tune.get_checkpoint()
if checkpoint:
# Upon trial resume, the local_trainer's objective gets reset to None.
# If `local_trainer.train` is a noop (training has already reached
# the target number of epochs/steps), then this would
# trigger an unnecessary extra checkpoint at the end of training.
# -> Set the objective to a dummy value upon resume as a workaround.
local_trainer.objective = "objective"
with checkpoint.as_directory() as checkpoint_dir:
checkpoint_path = next(Path(checkpoint_dir).glob(f"{PREFIX_CHECKPOINT_DIR}*")).as_posix()
local_trainer.train(resume_from_checkpoint=checkpoint_path, trial=trial)
else:
local_trainer.train(trial=trial)
# If there hasn't been any evaluation during the training loop.
if getattr(local_trainer, "objective", None) is None:
metrics = local_trainer.evaluate()
local_trainer.objective = local_trainer.compute_objective(metrics)
metrics.update({"objective": local_trainer.objective, "done": True})
with tempfile.TemporaryDirectory() as temp_checkpoint_dir:
local_trainer._tune_save_checkpoint(checkpoint_dir=temp_checkpoint_dir)
checkpoint = ray.tune.Checkpoint.from_directory(temp_checkpoint_dir)
ray.tune.report(metrics, checkpoint=checkpoint)
if not trainer._memory_tracker.skip_memory_metrics:
from ..trainer_utils import TrainerMemoryTracker
logger.warning(
"Memory tracking for your Trainer is currently "
"enabled. Automatically disabling the memory tracker "
"since the memory tracker is not serializable."
)
trainer._memory_tracker = TrainerMemoryTracker(skip_memory_metrics=True)
# The model and TensorBoard writer do not pickle so we have to remove them (if they exists)
# while doing the ray hp search.
_tb_writer = trainer.pop_callback(TensorBoardCallback)
trainer.model = None
# Setup default `resources_per_trial`.
if "resources_per_trial" not in kwargs:
# Default to 1 CPU and 1 GPU (if applicable) per trial.
kwargs["resources_per_trial"] = {"cpu": 1}
if trainer.args.n_gpu > 0:
kwargs["resources_per_trial"]["gpu"] = 1
resource_msg = "1 CPU" + (" and 1 GPU" if trainer.args.n_gpu > 0 else "")
logger.info(
"No `resources_per_trial` arg was passed into "
"`hyperparameter_search`. Setting it to a default value "
f"of {resource_msg} for each trial."
)
# Make sure each trainer only uses GPUs that were allocated per trial.
gpus_per_trial = kwargs["resources_per_trial"].get("gpu", 0)
trainer.args._n_gpu = gpus_per_trial
# Setup default `progress_reporter`.
if "progress_reporter" not in kwargs:
from ray.tune import CLIReporter
kwargs["progress_reporter"] = CLIReporter(metric_columns=["objective"])
if "scheduler" in kwargs:
from ray.tune.schedulers import ASHAScheduler, HyperBandForBOHB, MedianStoppingRule, PopulationBasedTraining
# Check for `do_eval` and `eval_during_training` for schedulers that require intermediate reporting.
if isinstance(
kwargs["scheduler"], (ASHAScheduler, MedianStoppingRule, HyperBandForBOHB, PopulationBasedTraining)
) and (not trainer.args.do_eval or trainer.args.eval_strategy == IntervalStrategy.NO):
raise RuntimeError(
"You are using {cls} as a scheduler but you haven't enabled evaluation during training. "
"This means your trials will not report intermediate results to Ray Tune, and "
"can thus not be stopped early or used to exploit other trials parameters. "
"If this is what you want, do not use {cls}. If you would like to use {cls}, "
"make sure you pass `do_eval=True` and `eval_strategy='steps'` in the "
"Trainer `args`.".format(cls=type(kwargs["scheduler"]).__name__)
)
trainable = ray.tune.with_parameters(_objective, local_trainer=trainer)
@functools.wraps(trainable)
def dynamic_modules_import_trainable(*args, **kwargs):
"""
Wrapper around `tune.with_parameters` to ensure datasets_modules are loaded on each Actor.
Without this, an ImportError will be thrown. See https://github.com/huggingface/transformers/issues/11565.
Assumes that `_objective`, defined above, is a function.
"""
if is_datasets_available() and packaging.version.parse(
importlib.metadata.version("datasets")
) < packaging.version.parse("4.0.0"):
import datasets.load
dynamic_modules_path = os.path.join(datasets.load.init_dynamic_modules(), "__init__.py")
# load dynamic_modules from path
spec = importlib.util.spec_from_file_location("datasets_modules", dynamic_modules_path)
datasets_modules = importlib.util.module_from_spec(spec)
sys.modules[spec.name] = datasets_modules
spec.loader.exec_module(datasets_modules)
return trainable(*args, **kwargs)
# special attr set by tune.with_parameters
if hasattr(trainable, "__mixins__"):
dynamic_modules_import_trainable.__mixins__ = trainable.__mixins__
analysis = ray.tune.run(
dynamic_modules_import_trainable,
config=trainer.hp_space(None),
num_samples=n_trials,
**kwargs,
)
ray_scope = os.getenv("RAY_SCOPE", "last")
best_trial = analysis.get_best_trial(metric="objective", mode=direction[:3], scope=ray_scope)
best_run = BestRun(best_trial.trial_id, best_trial.last_result["objective"], best_trial.config, analysis)
if _tb_writer is not None:
trainer.add_callback(_tb_writer)
return best_run
def run_hp_search_wandb(trainer, n_trials: int, direction: str, **kwargs) -> BestRun:
if not is_wandb_available():
raise ImportError("This function needs wandb installed: `pip install wandb`")
import wandb
# add WandbCallback if not already added in trainer callbacks
reporting_to_wandb = False
for callback in trainer.callback_handler.callbacks:
if isinstance(callback, WandbCallback):
reporting_to_wandb = True
break
if not reporting_to_wandb:
trainer.add_callback(WandbCallback())
trainer.args.report_to = ["wandb"]
best_trial = {"run_id": None, "objective": None, "hyperparameters": None}
sweep_id = kwargs.pop("sweep_id", None)
project = kwargs.pop("project", None)
name = kwargs.pop("name", None)
entity = kwargs.pop("entity", None)
metric = kwargs.pop("metric", "eval/loss")
sweep_config = trainer.hp_space(None)
sweep_config["metric"]["goal"] = direction
sweep_config["metric"]["name"] = metric
if name:
sweep_config["name"] = name
def _objective():
run = wandb.run if wandb.run else wandb.init()
trainer.state.trial_name = run.name
run.config.update({"assignments": {}, "metric": metric})
config = wandb.config
trainer.objective = None
trainer.train(resume_from_checkpoint=None, trial=vars(config)["_items"])
# If there hasn't been any evaluation during the training loop.
if getattr(trainer, "objective", None) is None:
metrics = trainer.evaluate()
trainer.objective = trainer.compute_objective(metrics)
format_metrics = rewrite_logs(metrics)
if metric not in format_metrics:
logger.warning(
f"Provided metric {metric} not found. This might result in unexpected sweeps charts. The available"
f" metrics are {format_metrics.keys()}"
)
best_score = False
if best_trial["run_id"] is not None:
if direction == "minimize":
best_score = trainer.objective < best_trial["objective"]
elif direction == "maximize":
best_score = trainer.objective > best_trial["objective"]
if best_score or best_trial["run_id"] is None:
best_trial["run_id"] = run.id
best_trial["objective"] = trainer.objective
best_trial["hyperparameters"] = dict(config)
return trainer.objective
if not sweep_id:
sweep_id = wandb.sweep(sweep_config, project=project, entity=entity)
else:
import wandb.env
if entity:
wandb.env.set_entity(entity)
wandb.env.set_project(project)
logger.info(f"wandb sweep id - {sweep_id}")
wandb.agent(sweep_id, function=_objective, count=n_trials)
return BestRun(best_trial["run_id"], best_trial["objective"], best_trial["hyperparameters"], sweep_id)
def get_available_reporting_integrations():
integrations = []
if is_azureml_available() and not is_mlflow_available():
integrations.append("azure_ml")
if is_comet_available():
integrations.append("comet_ml")
if is_dagshub_available():
integrations.append("dagshub")
if is_dvclive_available():
integrations.append("dvclive")
if is_mlflow_available():
integrations.append("mlflow")
if is_neptune_available():
integrations.append("neptune")
if is_tensorboard_available():
integrations.append("tensorboard")
if is_wandb_available():
integrations.append("wandb")
if is_codecarbon_available():
integrations.append("codecarbon")
if is_clearml_available():
integrations.append("clearml")
if is_swanlab_available():
integrations.append("swanlab")
if is_trackio_available():
integrations.append("trackio")
return integrations
def rewrite_logs(d):
new_d = {}
eval_prefix = "eval_"
eval_prefix_len = len(eval_prefix)
test_prefix = "test_"
test_prefix_len = len(test_prefix)
for k, v in d.items():
if k.startswith(eval_prefix):
new_d["eval/" + k[eval_prefix_len:]] = v
elif k.startswith(test_prefix):
new_d["test/" + k[test_prefix_len:]] = v
else:
new_d["train/" + k] = v
return new_d
def default_logdir() -> str:
"""
Same default as PyTorch
"""
import socket
from datetime import datetime
current_time = datetime.now().strftime("%b%d_%H-%M-%S")
return os.path.join("runs", current_time + "_" + socket.gethostname())
class TensorBoardCallback(TrainerCallback):
"""
A [`TrainerCallback`] that sends the logs to [TensorBoard](https://www.tensorflow.org/tensorboard).
Args:
tb_writer (`SummaryWriter`, *optional*):
The writer to use. Will instantiate one if not set.
Environment:
- **TENSORBOARD_LOGGING_DIR** (`str`, *optional*, defaults to `None`):
The logging dir to log the results. Default value is os.path.join(args.output_dir, default_logdir())
"""
def __init__(self, tb_writer=None):
if not is_tensorboard_available():
raise RuntimeError(
"TensorBoardCallback requires tensorboard to be installed. Either update your PyTorch version or"
" install tensorboardX."
)
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
self._SummaryWriter = SummaryWriter
self.tb_writer = tb_writer
self.logging_dir = os.getenv("TENSORBOARD_LOGGING_DIR", None)
if self.logging_dir is not None:
self.logging_dir = os.path.expanduser(self.logging_dir)
def _init_summary_writer(self, args):
if self._SummaryWriter is not None:
self.tb_writer = self._SummaryWriter(log_dir=self.logging_dir)
def on_train_begin(self, args, state, control, **kwargs):
if not state.is_world_process_zero:
return
if state.is_hyper_param_search:
trial_name = state.trial_name
if trial_name is not None:
# overwrite logging dir for trials
self.logging_dir = os.path.join(args.output_dir, default_logdir(), trial_name)
if self.logging_dir is None:
self.logging_dir = os.path.join(args.output_dir, default_logdir())
if self.tb_writer is None:
self._init_summary_writer(args)
if self.tb_writer is not None:
self.tb_writer.add_text("args", args.to_json_string())
if "model" in kwargs:
model = kwargs["model"]
if hasattr(model, "config") and model.config is not None:
model_config_json = model.config.to_json_string()
self.tb_writer.add_text("model_config", model_config_json)
def on_log(self, args, state, control, logs=None, **kwargs):
if not state.is_world_process_zero:
return
if self.tb_writer is None:
self._init_summary_writer(args)
if self.tb_writer is not None:
logs = rewrite_logs(logs)
for k, v in logs.items():
if isinstance(v, (int, float)):
self.tb_writer.add_scalar(k, v, state.global_step)
elif isinstance(v, str):
self.tb_writer.add_text(k, v, state.global_step)
else:
logger.warning(
"Trainer is attempting to log a value of "
f'"{v}" of type {type(v)} for key "{k}" as a scalar. '
"This invocation of Tensorboard's writer.add_scalar() "
"is incorrect so we dropped this attribute."
)
self.tb_writer.flush()
def on_train_end(self, args, state, control, **kwargs):
if self.tb_writer:
self.tb_writer.close()
self.tb_writer = None
def save_model_architecture_to_file(model: Any, output_dir: str):
with open(f"{output_dir}/model_architecture.txt", "w+") as f:
if isinstance(model, PreTrainedModel):
print(model, file=f)
elif is_torch_available() and (
isinstance(model, (torch.nn.Module, PushToHubMixin)) and hasattr(model, "base_model")
):
print(model, file=f)
class WandbLogModel(str, Enum):
"""Enum of possible log model values in W&B."""
CHECKPOINT = "checkpoint"
END = "end"
FALSE = "false"
@property
def is_enabled(self) -> bool:
"""Check if the value corresponds to a state where the `WANDB_LOG_MODEL` setting is enabled."""
return self in (WandbLogModel.CHECKPOINT, WandbLogModel.END)
@classmethod
def _missing_(cls, value: Any) -> "WandbLogModel":
if not isinstance(value, str):
raise TypeError(f"Expecting to have a string `WANDB_LOG_MODEL` setting, but got {type(value)}")
logger.warning(
f"Received unrecognized `WANDB_LOG_MODEL` setting value={value}; so disabling `WANDB_LOG_MODEL`"
)
return WandbLogModel.FALSE
class WandbCallback(TrainerCallback):
"""
A [`TrainerCallback`] that logs metrics, media, model checkpoints to [Weight and Biases](https://www.wandb.com/).
"""
def __init__(self):
has_wandb = is_wandb_available()
if not has_wandb:
raise RuntimeError("WandbCallback requires wandb to be installed. Run `pip install wandb`.")
import wandb
self._wandb = wandb
self._initialized = False
self._log_model = WandbLogModel(os.getenv("WANDB_LOG_MODEL", "false"))
def setup(self, args, state, model, **kwargs):
"""
Setup the optional Weights & Biases (*wandb*) integration.
One can subclass and override this method to customize the setup if needed. Find more information
[here](https://docs.wandb.ai/guides/integrations/huggingface). You can also override the following environment
variables:
Environment:
- **WANDB_LOG_MODEL** (`str`, *optional*, defaults to `"false"`):
Whether to log model and checkpoints during training. Can be `"end"`, `"checkpoint"` or `"false"`. If set
to `"end"`, the model will be uploaded at the end of training. If set to `"checkpoint"`, the checkpoint
will be uploaded every `args.save_steps` . If set to `"false"`, the model will not be uploaded. Use along
with [`~transformers.TrainingArguments.load_best_model_at_end`] to upload best model.
- **WANDB_WATCH** (`str`, *optional* defaults to `"false"`):
Can be `"gradients"`, `"all"`, `"parameters"`, or `"false"`. Set to `"all"` to log gradients and
parameters.
- **WANDB_PROJECT** (`str`, *optional*, defaults to `"huggingface"`):
Set this to a custom string to store results in a different project.
"""
if self._wandb is None:
return
self._initialized = True
# prepare to handle potential configuration issues during setup
from wandb.sdk.lib.config_util import ConfigError as WandbConfigError
if state.is_world_process_zero:
combined_dict = {**args.to_dict()}
if hasattr(model, "config") and model.config is not None:
model_config = model.config if isinstance(model.config, dict) else model.config.to_dict()
combined_dict = {**model_config, **combined_dict}
if hasattr(model, "peft_config") and model.peft_config is not None:
peft_config = model.peft_config
combined_dict = {"peft_config": peft_config, **combined_dict}
trial_name = state.trial_name
init_args = {}
if trial_name is not None:
init_args["name"] = trial_name
init_args["group"] = args.run_name or args.output_dir
elif args.run_name is not None:
init_args["name"] = args.run_name
if args.run_name == args.output_dir:
self._wandb.termwarn(
"The `run_name` is currently set to the same value as `TrainingArguments.output_dir`. If this was "
"not intended, please specify a different run name by setting the `TrainingArguments.run_name` parameter.",
repeat=False,
)
if self._wandb.run is None:
self._wandb.init(
project=os.getenv("WANDB_PROJECT", "huggingface"),
**init_args,
)
# add config parameters (run may have been created manually)
self._wandb.config.update(combined_dict or {}, allow_val_change=True)
# define default x-axis (for latest wandb versions)
if getattr(self._wandb, "define_metric", None):
self._wandb.define_metric("train/global_step")
self._wandb.define_metric("*", step_metric="train/global_step", step_sync=True)
# keep track of model topology and gradients, unsupported on TPU
_watch_model = os.getenv("WANDB_WATCH", "false")
if not is_torch_xla_available() and _watch_model in ("all", "parameters", "gradients"):
self._wandb.watch(model, log=_watch_model, log_freq=max(100, state.logging_steps))
self._wandb.run._label(code="transformers_trainer")
# add number of model parameters to wandb config
try:
self._wandb.config["model/num_parameters"] = model.num_parameters()
except AttributeError:
logger.info(
"Could not log the number of model parameters in Weights & Biases due to an AttributeError."
)
except WandbConfigError:
logger.warning(
"A ConfigError was raised whilst setting the number of model parameters in Weights & Biases config."
)
# log the initial model architecture to an artifact
if self._log_model.is_enabled:
with tempfile.TemporaryDirectory() as temp_dir:
model_name = (
f"model-{self._wandb.run.id}"
if (args.run_name is None or args.run_name == args.output_dir)
else f"model-{self._wandb.run.name}"
)
model_artifact = self._wandb.Artifact(
name=model_name,
type="model",
metadata={
"model_config": model.config.to_dict() if hasattr(model, "config") else None,
"num_parameters": self._wandb.config.get("model/num_parameters"),
"initial_model": True,
},
)
# add the architecture to a separate text file
save_model_architecture_to_file(model, temp_dir)
for f in Path(temp_dir).glob("*"):
if f.is_file():
with model_artifact.new_file(f.name, mode="wb") as fa:
fa.write(f.read_bytes())
self._wandb.run.log_artifact(model_artifact, aliases=["base_model"])
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | true |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/integrations/mxfp4.py | src/transformers/integrations/mxfp4.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..utils import is_torch_available, is_torch_xpu_available, logging
if is_torch_available():
import torch
from torch import nn
from contextlib import contextmanager
from typing import Optional
from ..core_model_loading import ConversionOps
from ..quantizers.quantizers_utils import get_module_from_name, should_convert_module
logger = logging.get_logger(__name__)
FP4_VALUES = [
+0.0,
+0.5,
+1.0,
+1.5,
+2.0,
+3.0,
+4.0,
+6.0,
-0.0,
-0.5,
-1.0,
-1.5,
-2.0,
-3.0,
-4.0,
-6.0,
]
@contextmanager
def on_device(dev):
if is_torch_available():
import torch
if isinstance(dev, torch.Tensor):
dev = dev.device
elif isinstance(dev, str):
dev = torch.device(dev)
dev_type = getattr(dev, "type", None)
if dev_type == "cuda":
with torch.cuda.device(dev):
yield
return
if dev_type == "xpu" and hasattr(torch, "xpu"):
with torch.xpu.device(dev):
yield
return
# other: CPU
yield
class Mxfp4Quantize(ConversionOps):
def __init__(self, hf_quantizer):
self.hf_quantizer = hf_quantizer
def convert(
self,
input_dict: dict[str, torch.Tensor],
model: Optional[torch.nn.Module] = None,
missing_keys: Optional[list[str]] = None,
full_layer_name: str | None = None,
**kwargs,
) -> dict[str, torch.Tensor]:
_, value = tuple(input_dict.items())[0]
value = value[0] if isinstance(value, list) else value
module, _ = get_module_from_name(model, full_layer_name)
with torch.device(value.device):
if isinstance(module, Mxfp4GptOssExperts):
triton_weight_tensor, weight_scale = quantize_to_mxfp4(value.transpose(-1, -2), triton_kernels_hub)
PrecisionConfig, FlexCtx, InFlexData = (
triton_kernels_hub.matmul_ogs.PrecisionConfig,
triton_kernels_hub.matmul_ogs.FlexCtx,
triton_kernels_hub.matmul_ogs.InFlexData,
)
triton_weight_tensor, weight_scale = swizzle_mxfp4(
triton_weight_tensor, weight_scale, triton_kernels_hub
)
proj = "gate_up_proj" if "gate_up_proj" in full_layer_name else "down_proj"
if proj in module._parameters:
# Remove the nn.Parameter registration so we can attach the Triton tensor
del module._parameters[proj]
setattr(module, proj, triton_weight_tensor)
setattr(
module,
f"{proj}_precision_config",
PrecisionConfig(weight_scale=weight_scale, flex_ctx=FlexCtx(rhs_data=InFlexData())),
)
missing_keys.discard(f"{full_layer_name}")
module._is_hf_initialized = True
return {}
class Mxfp4Dequantize(ConversionOps):
def __init__(self, hf_quantizer):
self.hf_quantizer = hf_quantizer
def convert(
self,
input_dict: dict[str, torch.Tensor],
model: Optional[torch.nn.Module] = None,
full_layer_name: str | None = None,
missing_keys=None,
**kwargs,
) -> dict[str, torch.Tensor]:
param_data = {}
if "_blocks" in input_dict.keys():
if isinstance(input_dict["_blocks"], list):
param_data["_blocks"] = input_dict["_blocks"][0]
else:
param_data["_blocks"] = input_dict["_blocks"]
if "_scales" in input_dict.keys():
if isinstance(input_dict["_scales"], list):
param_data["_scales"] = input_dict["_scales"][0]
else:
param_data["_scales"] = input_dict["_scales"]
# Here we are dequantizing the weights
dequantized = dequantize_convertops(param_data["_blocks"], param_data["_scales"], param_data["_blocks"].device)
return {full_layer_name: dequantized}
class Mxfp4Deserialize(ConversionOps):
def __init__(self, hf_quantizer):
self.hf_quantizer = hf_quantizer
def convert(
self,
input_dict: dict[str, torch.Tensor],
model: Optional[torch.nn.Module] = None,
full_layer_name: str | None = None,
missing_keys: Optional[list[str]] = None,
**kwargs,
) -> dict[str, torch.Tensor]:
param_data = {}
if "_blocks" in input_dict.keys():
if isinstance(input_dict["_blocks"], list):
param_data["_blocks"] = input_dict["_blocks"][0]
else:
param_data["_blocks"] = input_dict["_blocks"]
if "_scales" in input_dict.keys():
if isinstance(input_dict["_scales"], list):
param_data["_scales"] = input_dict["_scales"][0]
else:
param_data["_scales"] = input_dict["_scales"]
# Eagerly set tensors on the module and perform swizzle
module, _ = get_module_from_name(model, full_layer_name)
proj = "gate_up_proj" if "gate_up_proj" in full_layer_name else "down_proj"
swizzle_mxfp4_convertops(
param_data["_blocks"],
param_data["_scales"],
module,
proj,
param_data["_blocks"].device,
triton_kernels_hub,
)
missing_keys.discard(f"{full_layer_name}")
module._is_hf_initialized = True
# We return an empty mapping since the module was updated in-place. This prevents
# the loader from trying to materialize the original meta-parameter names again.
# We don't use set_param_for_module since it expects mainly a torch.nn.Parameter or a safetensors pointer
return {}
# Copied from GPT_OSS repo and vllm
def quantize_to_mxfp4(w, triton_kernels_hub):
downcast_to_mxfp_torch = triton_kernels_hub.numerics_details.mxfp.downcast_to_mxfp_torch
w, w_scale = downcast_to_mxfp_torch(w.to(torch.bfloat16), torch.uint8, axis=1)
return w, w_scale
def swizzle_mxfp4(w, w_scale, triton_kernels_hub):
"""
Changes the layout of the tensors depending on the hardware
"""
FP4, convert_layout, wrap_torch_tensor = (
triton_kernels_hub.tensor.FP4,
triton_kernels_hub.tensor.convert_layout,
triton_kernels_hub.tensor.wrap_torch_tensor,
)
layout = triton_kernels_hub.tensor_details.layout
StridedLayout = triton_kernels_hub.tensor_details.layout.StridedLayout
value_layout, value_layout_opts = layout.make_default_matmul_mxfp4_w_layout(mx_axis=1)
w = convert_layout(wrap_torch_tensor(w, dtype=FP4), value_layout, **value_layout_opts)
w_scale = convert_layout(wrap_torch_tensor(w_scale), StridedLayout)
return w, w_scale
# Copied from GPT_OSS repo
# TODO: Add absolute link when the repo is public
def convert_moe_packed_tensors(
blocks,
scales,
*,
dtype: torch.dtype = torch.bfloat16,
rows_per_chunk: int = 32768 * 1024, # TODO these values are not here by mistake ;)
) -> torch.Tensor:
"""
Convert the mxfp4 weights again, dequantizing and makes them compatible with the forward
pass of GPT_OSS.
"""
import math
blocks = blocks.to(torch.uint8)
# Check if blocks and scales are on CPU, and move to GPU if so
if not blocks.is_cuda and torch.cuda.is_available():
blocks = blocks.cuda()
scales = scales.cuda()
elif (blocks.device.type != "xpu") and is_torch_xpu_available():
blocks = blocks.to("xpu")
scales = scales.to("xpu")
scales = scales.to(torch.int32) - 127 # TODO that's because 128=2**7
assert blocks.shape[:-1] == scales.shape, f"{blocks.shape[:-1]=} does not match {scales.shape=}"
lut = torch.tensor(FP4_VALUES, dtype=dtype, device=blocks.device)
*prefix_shape, G, B = blocks.shape
rows_total = math.prod(prefix_shape) * G
blocks = blocks.reshape(rows_total, B)
scales = scales.reshape(rows_total, 1)
out = torch.empty(rows_total, B * 2, dtype=dtype, device=blocks.device)
for r0 in range(0, rows_total, rows_per_chunk):
r1 = min(r0 + rows_per_chunk, rows_total)
blk = blocks[r0:r1]
exp = scales[r0:r1]
# nibble indices -> int64
idx_lo = (blk & 0x0F).to(torch.long)
idx_hi = (blk >> 4).to(torch.long)
sub = out[r0:r1]
sub[:, 0::2] = lut[idx_lo]
sub[:, 1::2] = lut[idx_hi]
torch.ldexp(sub, exp, out=sub)
del idx_lo, idx_hi, blk, exp, sub
out = out.reshape(*prefix_shape, G, B * 2).view(*prefix_shape, G * B * 2)
del blocks, scales, lut
return out.transpose(1, 2).contiguous()
class Mxfp4GptOssExperts(nn.Module):
def __init__(self, config):
super().__init__()
self.num_experts = config.num_local_experts
self.intermediate_size = config.intermediate_size
self.hidden_size = config.hidden_size
self.gate_up_proj = nn.Parameter(
torch.zeros(self.num_experts, 2 * self.intermediate_size, self.hidden_size // 32, 16, dtype=torch.uint8),
requires_grad=False,
)
self.gate_up_proj_bias = nn.Parameter(
torch.zeros(self.num_experts, 2 * self.intermediate_size, dtype=torch.float32), requires_grad=False
)
self.down_proj = nn.Parameter(
torch.zeros((self.num_experts, self.hidden_size, self.intermediate_size // 32, 16), dtype=torch.uint8),
requires_grad=False,
)
self.down_proj_bias = nn.Parameter(
torch.zeros(self.num_experts, self.hidden_size, dtype=torch.float32), requires_grad=False
)
self.alpha = 1.702
self.limit = getattr(config, "swiglu_limit", 7.0)
self.gate_up_proj_precision_config = None
self.down_proj_precision_config = None
self.limit = getattr(config, "swiglu_limit", 7.0)
def forward(self, hidden_states: torch.Tensor, routing_data, gather_idx, scatter_idx) -> torch.Tensor:
FnSpecs, FusedActivation, matmul_ogs = (
triton_kernels_hub.matmul_ogs.FnSpecs,
triton_kernels_hub.matmul_ogs.FusedActivation,
triton_kernels_hub.matmul_ogs.matmul_ogs,
)
swiglu_fn = triton_kernels_hub.swiglu.swiglu_fn
with on_device(hidden_states.device):
act = FusedActivation(FnSpecs("swiglu", swiglu_fn, ("alpha", "limit")), (self.alpha, self.limit), 2)
intermediate_cache1 = matmul_ogs(
hidden_states,
self.gate_up_proj,
self.gate_up_proj_bias.to(torch.float32),
routing_data,
gather_indx=gather_idx,
precision_config=self.gate_up_proj_precision_config,
gammas=None,
fused_activation=act,
)
intermediate_cache3 = matmul_ogs(
intermediate_cache1,
self.down_proj,
self.down_proj_bias.to(torch.float32),
routing_data,
scatter_indx=scatter_idx,
precision_config=self.down_proj_precision_config,
gammas=routing_data.gate_scal,
)
return intermediate_cache3
# Adapted from GPT_OSS repo
# TODO: Add absolute link when the repo is public
def routing_torch_dist(
logits,
n_expts_act,
):
import os
GatherIndx, RoutingData, ScatterIndx, compute_expt_data_torch = (
triton_kernels_hub.routing.GatherIndx,
triton_kernels_hub.routing.RoutingData,
triton_kernels_hub.routing.ScatterIndx,
triton_kernels_hub.routing.compute_expt_data_torch,
)
with on_device(logits.device):
world_size = torch.distributed.get_world_size()
rank = int(os.environ.get("LOCAL_RANK", "0"))
replace_value = -1
n_tokens = logits.shape[0]
n_expts_tot = logits.shape[1]
n_local_experts = n_expts_tot // world_size
local_expert_start = rank * n_local_experts
local_expert_end = (rank + 1) * n_local_experts
n_gates_pad = n_tokens * n_expts_act
def topk(vals, k):
tk_indx = torch.argsort(-vals, dim=1, stable=True)[:, :k]
tk_indx = tk_indx.long()
tk_val = torch.take_along_dim(vals, tk_indx, dim=1)
return tk_val, tk_indx.int()
expt_scal, expt_indx = topk(logits, n_expts_act)
expt_scal = torch.softmax(expt_scal, dim=-1)
expt_indx, sort_indices = torch.sort(expt_indx, dim=1)
expt_scal = torch.gather(expt_scal, 1, sort_indices)
# Flatten and mask for local experts
expt_scal = expt_scal.reshape(-1)
hist = torch.histc(expt_indx, bins=n_expts_tot, max=n_expts_tot - 1)[local_expert_start:local_expert_end]
expt_indx = expt_indx.view(-1).to(torch.int32)
# we use a large value to replace the indices that are not in the local expert range
var = 1000
expt_indx = torch.where(expt_indx < local_expert_start, var, expt_indx)
topk_indx = torch.argsort(expt_indx, stable=True).to(torch.int32)
gate_indx = torch.argsort(topk_indx).to(torch.int32)
expt_indx = torch.where(expt_indx < local_expert_end, expt_indx, replace_value)
expt_indx = torch.where(local_expert_start <= expt_indx, expt_indx, replace_value)
gate_indx = torch.where(expt_indx == replace_value, replace_value, gate_indx)
gate_scal = expt_scal[topk_indx]
topk_indx = torch.where(gate_indx[topk_indx] == replace_value, replace_value, topk_indx)
# # Routing metadata for local expert computation
gather_indx = GatherIndx(src_indx=topk_indx.int(), dst_indx=gate_indx.int())
scatter_indx = ScatterIndx(src_indx=gate_indx.int(), dst_indx=topk_indx.int())
expt_data = compute_expt_data_torch(hist, n_local_experts, n_gates_pad)
hit_experts = n_expts_act
return RoutingData(gate_scal, hist, n_local_experts, hit_experts, expt_data), gather_indx, scatter_indx
def mlp_forward(self, hidden_states):
import torch.distributed as dist
if dist.is_available() and dist.is_initialized() and hasattr(self, "_is_hooked"):
routing = routing_torch_dist
else:
routing = triton_kernels_hub.routing.routing
batch_size = hidden_states.shape[0]
hidden_states = hidden_states.reshape(-1, self.router.hidden_dim)
router_logits = nn.functional.linear(hidden_states, self.router.weight, self.router.bias)
with on_device(router_logits.device):
routing_data, gather_idx, scatter_idx = routing(router_logits, self.router.top_k)
routed_out = self.experts(hidden_states, routing_data, gather_idx, scatter_idx)
routed_out = routed_out.reshape(batch_size, -1, self.router.hidden_dim)
return routed_out, router_logits
def dequantize(module, param_name, param_value, target_device, dq_param_name, **kwargs):
from ..integrations.tensor_parallel import shard_and_distribute_module
model = kwargs.get("model")
empty_param = kwargs.get("empty_param")
casting_dtype = kwargs.get("casting_dtype")
to_contiguous = kwargs.get("to_contiguous")
rank = kwargs.get("rank")
device_mesh = kwargs.get("device_mesh")
for proj in ["gate_up_proj", "down_proj"]:
if proj in param_name:
if device_mesh is not None:
param_value = shard_and_distribute_module(
model,
param_value,
empty_param,
dq_param_name,
casting_dtype,
to_contiguous,
rank,
device_mesh,
)
blocks_attr = f"{proj}_blocks"
scales_attr = f"{proj}_scales"
setattr(module, param_name.rsplit(".", 1)[1], param_value)
if hasattr(module, blocks_attr) and hasattr(module, scales_attr):
dequantized = convert_moe_packed_tensors(getattr(module, blocks_attr), getattr(module, scales_attr))
if target_device == "cpu" and torch.cuda.is_available():
torch.cuda.empty_cache()
elif target_device == "cpu" and is_torch_xpu_available():
torch.xpu.empty_cache()
setattr(module, proj, torch.nn.Parameter(dequantized.to(target_device)))
delattr(module, blocks_attr)
delattr(module, scales_attr)
def dequantize_convertops(blocks, scales, target_device):
dequantized = convert_moe_packed_tensors(blocks, scales)
if target_device == "cpu" and torch.cuda.is_available():
torch.cuda.empty_cache()
dequantized = torch.nn.Parameter(dequantized.to(target_device))
return dequantized
def load_and_swizzle_mxfp4(module, param_name, param_value, target_device, triton_kernels_hub, **kwargs):
"""
This transforms the weights obtained using `convert_gpt_oss.py` to load them into `Mxfp4GptOssExperts`.
"""
PrecisionConfig, FlexCtx, InFlexData = (
triton_kernels_hub.matmul_ogs.PrecisionConfig,
triton_kernels_hub.matmul_ogs.FlexCtx,
triton_kernels_hub.matmul_ogs.InFlexData,
)
from ..integrations.tensor_parallel import shard_and_distribute_module
model = kwargs.get("model")
empty_param = kwargs.get("empty_param")
casting_dtype = kwargs.get("casting_dtype")
to_contiguous = kwargs.get("to_contiguous")
rank = kwargs.get("rank")
device_mesh = kwargs.get("device_mesh")
if "blocks" in param_name:
proj = param_name.split(".")[-1].split("_blocks")[0]
if "scales" in param_name:
proj = param_name.split(".")[-1].split("_scales")[0]
if device_mesh is not None:
shard_and_distribute_module(
model, param_value, empty_param, param_name, casting_dtype, to_contiguous, rank, device_mesh
)
else:
setattr(module, param_name.rsplit(".", 1)[1], torch.nn.Parameter(param_value, requires_grad=False))
blocks_attr = f"{proj}_blocks"
scales_attr = f"{proj}_scales"
blocks = getattr(module, blocks_attr) # at this point values were loaded from ckpt
scales = getattr(module, scales_attr)
# Check if both blocks and scales both not on meta device
if blocks.device.type != "meta" and scales.device.type != "meta":
local_experts = blocks.size(0)
if proj == "gate_up_proj":
blocks = blocks.reshape(local_experts, module.intermediate_size * 2, -1)
else:
blocks = blocks.reshape(local_experts, -1, module.intermediate_size // 2)
if getattr(target_device, "type", target_device) == "cpu":
target_device = torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda"
blocks = blocks.to(target_device).contiguous()
scales = scales.to(target_device).contiguous()
with on_device(target_device):
triton_weight_tensor, weight_scale = swizzle_mxfp4(
blocks.transpose(-2, -1), scales.transpose(-2, -1), triton_kernels_hub
)
# need to overwrite the shapes for the kernels
if proj == "gate_up_proj":
triton_weight_tensor.shape = torch.Size([local_experts, module.hidden_size, module.intermediate_size * 2])
else:
triton_weight_tensor.shape = torch.Size([local_experts, module.intermediate_size, module.hidden_size])
# triton_weight_tensor is what needs to be passed in oai kernels. It stores the data, the shapes and any more objects. It is like a subtensor
setattr(module, proj, triton_weight_tensor)
setattr(
module,
f"{proj}_precision_config",
PrecisionConfig(weight_scale=weight_scale, flex_ctx=FlexCtx(rhs_data=InFlexData())),
)
# delete blocks and scales
delattr(module, scales_attr)
delattr(module, blocks_attr)
del blocks
def swizzle_mxfp4_convertops(blocks, scales, module, proj, target_device, triton_kernels_hub):
"""
This transforms the weights obtained using `convert_gpt_oss.py` to load them into `Mxfp4GptOssExperts`.
"""
PrecisionConfig, FlexCtx, InFlexData = (
triton_kernels_hub.matmul_ogs.PrecisionConfig,
triton_kernels_hub.matmul_ogs.FlexCtx,
triton_kernels_hub.matmul_ogs.InFlexData,
)
local_experts = blocks.size(0)
if getattr(target_device, "type", target_device) == "cpu":
target_device = torch.accelerator.current_accelerator().type if hasattr(torch, "accelerator") else "cuda"
blocks = blocks.to(target_device).contiguous()
scales = scales.to(target_device).contiguous()
if proj == "gate_up_proj":
blocks = blocks.reshape(local_experts, module.intermediate_size * 2, -1)
else:
blocks = blocks.reshape(local_experts, -1, module.intermediate_size // 2)
if getattr(target_device, "type", target_device) == "cpu":
target_device = "cuda"
with on_device(target_device):
triton_weight_tensor, weight_scale = swizzle_mxfp4(
blocks.transpose(-2, -1), scales.transpose(-2, -1), triton_kernels_hub
)
# need to overwrite the shapes for the kernels
if proj == "gate_up_proj":
triton_weight_tensor.shape = torch.Size([local_experts, module.hidden_size, module.intermediate_size * 2])
else:
triton_weight_tensor.shape = torch.Size([local_experts, module.intermediate_size, module.hidden_size])
# triton_weight_tensor is what needs to be passed in oai kernels. It stores the data, the shapes and any more objects. It's like a subtensor
# Since the Experts module registers gate_up_proj and down_proj as nn.Parameters, we need to remove them so we can attach the Triton tensor
if proj in module._parameters:
# Remove the nn.Parameter registration so we can attach the Triton tensor
del module._parameters[proj]
setattr(module, proj, triton_weight_tensor)
setattr(
module,
f"{proj}_precision_config",
PrecisionConfig(weight_scale=weight_scale, flex_ctx=FlexCtx(rhs_data=InFlexData())),
)
def replace_with_mxfp4_linear(model, quantization_config=None, modules_to_not_convert: list[str] | None = None):
"""
Public method that replaces the expert layers of the given model with mxfp4 quantized layers.
Args:
model (`torch.nn.Module`):
The model to convert, can be any `torch.nn.Module` instance.
quantization_config (`Mxfp4Config`, defaults to `None`):
The quantization config object that contains the quantization parameters.
modules_to_not_convert (`list`, *optional*, defaults to `None`):
A list of modules to not convert. If a module name is in the list (e.g. `lm_head`), it will not be
converted.
"""
if quantization_config.dequantize:
return model
from .hub_kernels import get_kernel
global triton_kernels_hub
triton_kernels_hub = get_kernel("kernels-community/triton_kernels")
has_been_replaced = False
for module_name, module in model.named_modules():
if not should_convert_module(module_name, modules_to_not_convert):
continue
if module.__class__.__name__ == "GptOssExperts" and not quantization_config.dequantize:
with torch.device("meta"):
model.set_submodule(module_name, Mxfp4GptOssExperts(model.config))
has_been_replaced = True
if module.__class__.__name__ == "GptOssMLP" and not quantization_config.dequantize:
from types import MethodType
module.forward = MethodType(mlp_forward, module)
if not has_been_replaced:
logger.warning(
"You are loading your model using mixed-precision FP4 quantization but no linear modules were found in your model."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug."
)
return model
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/__init__.py | src/transformers/models/__init__.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..utils import _LazyModule
from ..utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .afmoe import *
from .aimv2 import *
from .albert import *
from .align import *
from .altclip import *
from .apertus import *
from .arcee import *
from .aria import *
from .audio_spectrogram_transformer import *
from .audioflamingo3 import *
from .auto import *
from .autoformer import *
from .aya_vision import *
from .bamba import *
from .bark import *
from .bart import *
from .barthez import *
from .bartpho import *
from .beit import *
from .bert import *
from .bert_generation import *
from .bert_japanese import *
from .bertweet import *
from .big_bird import *
from .bigbird_pegasus import *
from .biogpt import *
from .bit import *
from .bitnet import *
from .blenderbot import *
from .blenderbot_small import *
from .blip import *
from .blip_2 import *
from .bloom import *
from .blt import *
from .bridgetower import *
from .bros import *
from .byt5 import *
from .camembert import *
from .canine import *
from .chameleon import *
from .chinese_clip import *
from .clap import *
from .clip import *
from .clipseg import *
from .clvp import *
from .code_llama import *
from .codegen import *
from .cohere import *
from .cohere2 import *
from .cohere2_vision import *
from .colpali import *
from .colqwen2 import *
from .conditional_detr import *
from .convbert import *
from .convnext import *
from .convnextv2 import *
from .cpm import *
from .cpmant import *
from .csm import *
from .ctrl import *
from .cvt import *
from .cwm import *
from .d_fine import *
from .dab_detr import *
from .dac import *
from .data2vec import *
from .dbrx import *
from .deberta import *
from .deberta_v2 import *
from .decision_transformer import *
from .deepseek_v2 import *
from .deepseek_v3 import *
from .deepseek_vl import *
from .deepseek_vl_hybrid import *
from .deformable_detr import *
from .deit import *
from .deprecated import *
from .depth_anything import *
from .depth_pro import *
from .detr import *
from .dia import *
from .dialogpt import *
from .diffllama import *
from .dinat import *
from .dinov2 import *
from .dinov2_with_registers import *
from .dinov3_convnext import *
from .dinov3_vit import *
from .distilbert import *
from .dit import *
from .doge import *
from .donut import *
from .dots1 import *
from .dpr import *
from .dpt import *
from .edgetam import *
from .edgetam_video import *
from .efficientloftr import *
from .efficientnet import *
from .electra import *
from .emu3 import *
from .encodec import *
from .encoder_decoder import *
from .ernie import *
from .ernie4_5 import *
from .ernie4_5_moe import *
from .esm import *
from .evolla import *
from .exaone4 import *
from .falcon import *
from .falcon_h1 import *
from .falcon_mamba import *
from .fast_vlm import *
from .fastspeech2_conformer import *
from .flaubert import *
from .flava import *
from .flex_olmo import *
from .florence2 import *
from .fnet import *
from .focalnet import *
from .fsmt import *
from .funnel import *
from .fuyu import *
from .gemma import *
from .gemma2 import *
from .gemma3 import *
from .gemma3n import *
from .git import *
from .glm import *
from .glm4 import *
from .glm4_moe import *
from .glm4v import *
from .glm4v_moe import *
from .glm46v import *
from .glmasr import *
from .glpn import *
from .got_ocr2 import *
from .gpt2 import *
from .gpt_bigcode import *
from .gpt_neo import *
from .gpt_neox import *
from .gpt_neox_japanese import *
from .gpt_oss import *
from .gpt_sw3 import *
from .gptj import *
from .granite import *
from .granite_speech import *
from .granitemoe import *
from .granitemoehybrid import *
from .granitemoeshared import *
from .grounding_dino import *
from .groupvit import *
from .helium import *
from .herbert import *
from .hgnet_v2 import *
from .hiera import *
from .hubert import *
from .hunyuan_v1_dense import *
from .hunyuan_v1_moe import *
from .ibert import *
from .idefics import *
from .idefics2 import *
from .idefics3 import *
from .ijepa import *
from .imagegpt import *
from .informer import *
from .instructblip import *
from .instructblipvideo import *
from .internvl import *
from .jamba import *
from .janus import *
from .jetmoe import *
from .kosmos2 import *
from .kyutai_speech_to_text import *
from .lasr import *
from .layoutlm import *
from .layoutlmv2 import *
from .layoutlmv3 import *
from .layoutxlm import *
from .led import *
from .levit import *
from .lfm2 import *
from .lfm2_moe import *
from .lfm2_vl import *
from .lightglue import *
from .lilt import *
from .llama import *
from .llama4 import *
from .llava import *
from .llava_next import *
from .llava_next_video import *
from .llava_onevision import *
from .longcat_flash import *
from .longformer import *
from .longt5 import *
from .luke import *
from .lxmert import *
from .m2m_100 import *
from .mamba import *
from .mamba2 import *
from .marian import *
from .markuplm import *
from .mask2former import *
from .maskformer import *
from .mbart import *
from .mbart50 import *
from .megatron_bert import *
from .megatron_gpt2 import *
from .mgp_str import *
from .mimi import *
from .minimax import *
from .ministral import *
from .ministral3 import *
from .mistral import *
from .mistral3 import *
from .mixtral import *
from .mlcd import *
from .mllama import *
from .mluke import *
from .mobilebert import *
from .mobilenet_v1 import *
from .mobilenet_v2 import *
from .mobilevit import *
from .mobilevitv2 import *
from .modernbert import *
from .modernbert_decoder import *
from .moonshine import *
from .moshi import *
from .mpnet import *
from .mpt import *
from .mra import *
from .mt5 import *
from .musicgen import *
from .musicgen_melody import *
from .mvp import *
from .myt5 import *
from .nanochat import *
from .nemotron import *
from .nllb import *
from .nllb_moe import *
from .nougat import *
from .nystromformer import *
from .olmo import *
from .olmo2 import *
from .olmo3 import *
from .olmoe import *
from .omdet_turbo import *
from .oneformer import *
from .openai import *
from .opt import *
from .ovis2 import *
from .owlv2 import *
from .owlvit import *
from .paddleocr_vl import *
from .paligemma import *
from .parakeet import *
from .patchtsmixer import *
from .patchtst import *
from .pegasus import *
from .pegasus_x import *
from .perceiver import *
from .perception_lm import *
from .persimmon import *
from .phi import *
from .phi3 import *
from .phi4_multimodal import *
from .phimoe import *
from .phobert import *
from .pix2struct import *
from .pixio import *
from .pixtral import *
from .plbart import *
from .poolformer import *
from .pop2piano import *
from .prompt_depth_anything import *
from .prophetnet import *
from .pvt import *
from .pvt_v2 import *
from .qwen2 import *
from .qwen2_5_omni import *
from .qwen2_5_vl import *
from .qwen2_audio import *
from .qwen2_moe import *
from .qwen2_vl import *
from .qwen3 import *
from .qwen3_moe import *
from .qwen3_next import *
from .qwen3_omni_moe import *
from .qwen3_vl import *
from .qwen3_vl_moe import *
from .rag import *
from .recurrent_gemma import *
from .reformer import *
from .regnet import *
from .rembert import *
from .resnet import *
from .roberta import *
from .roberta_prelayernorm import *
from .roc_bert import *
from .roformer import *
from .rt_detr import *
from .rt_detr_v2 import *
from .rwkv import *
from .sam import *
from .sam2 import *
from .sam2_video import *
from .sam3_tracker import *
from .sam3_tracker_video import *
from .sam_hq import *
from .seamless_m4t import *
from .seamless_m4t_v2 import *
from .seed_oss import *
from .segformer import *
from .seggpt import *
from .sew import *
from .sew_d import *
from .shieldgemma2 import *
from .siglip import *
from .siglip2 import *
from .smollm3 import *
from .smolvlm import *
from .speech_encoder_decoder import *
from .speech_to_text import *
from .speecht5 import *
from .splinter import *
from .squeezebert import *
from .stablelm import *
from .starcoder2 import *
from .superglue import *
from .superpoint import *
from .swiftformer import *
from .swin import *
from .swin2sr import *
from .swinv2 import *
from .switch_transformers import *
from .t5 import *
from .t5gemma import *
from .t5gemma2 import *
from .table_transformer import *
from .tapas import *
from .textnet import *
from .time_series_transformer import *
from .timesfm import *
from .timesformer import *
from .timm_backbone import *
from .timm_wrapper import *
from .trocr import *
from .tvp import *
from .udop import *
from .umt5 import *
from .unispeech import *
from .unispeech_sat import *
from .univnet import *
from .upernet import *
from .vaultgemma import *
from .video_llama_3 import *
from .video_llava import *
from .videomae import *
from .vilt import *
from .vipllava import *
from .vision_encoder_decoder import *
from .vision_text_dual_encoder import *
from .visual_bert import *
from .vit import *
from .vit_mae import *
from .vit_msn import *
from .vitdet import *
from .vitmatte import *
from .vitpose import *
from .vitpose_backbone import *
from .vits import *
from .vivit import *
from .vjepa2 import *
from .voxtral import *
from .wav2vec2 import *
from .wav2vec2_bert import *
from .wav2vec2_conformer import *
from .wav2vec2_phoneme import *
from .wav2vec2_with_lm import *
from .wavlm import *
from .whisper import *
from .x_clip import *
from .xcodec import *
from .xglm import *
from .xlm import *
from .xlm_roberta import *
from .xlm_roberta_xl import *
from .xlnet import *
from .xlstm import *
from .xmod import *
from .yolos import *
from .yoso import *
from .zamba import *
from .zamba2 import *
from .zoedepth import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/ernie4_5_moe/modular_ernie4_5_moe.py | src/transformers/models/ernie4_5_moe/modular_ernie4_5_moe.py | # Copyright (c) 2025 Baidu, Inc. and HuggingFace Inc. team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch Ernie 4.5 MoE model."""
from typing import Optional
import torch
import torch.nn.functional as F
from torch import nn
from ... import initialization as init
from ...cache_utils import Cache, DynamicCache
from ...masking_utils import create_causal_mask
from ...modeling_outputs import MoeModelOutputWithPast
from ...modeling_utils import PreTrainedModel
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from ...utils.generic import OutputRecorder, check_model_inputs, maybe_autocast
from ..ernie4_5.modeling_ernie4_5 import Ernie4_5RotaryEmbedding, apply_rotary_pos_emb, rotate_half # noqa: F401
from ..llama.modeling_llama import LlamaAttention, LlamaRMSNorm
from ..mixtral.modeling_mixtral import (
MixtralExperts,
MixtralForCausalLM,
MixtralPreTrainedModel,
)
from ..qwen3_moe.modeling_qwen3_moe import Qwen3MoeDecoderLayer, Qwen3MoeMLP
from .configuration_ernie4_5_moe import Ernie4_5_MoeConfig
logger = logging.get_logger(__name__)
class Ernie4_5_MoeRMSNorm(LlamaRMSNorm):
pass
class Ernie4_5_MoeMLP(Qwen3MoeMLP):
def __init__(self, config, intermediate_size=None):
super().__init__(config, intermediate_size)
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.use_bias)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.use_bias)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.use_bias)
class Ernie4_5_MoeRotaryEmbedding(Ernie4_5RotaryEmbedding):
def __init__(self, config: Ernie4_5_MoeConfig, device=None):
super().__init__(config, device)
class Ernie4_5_MoeAttention(LlamaAttention):
def __init__(self, config: Ernie4_5_MoeConfig, layer_idx: int):
super().__init__(config, layer_idx)
self.attention_dropout = 0.0
self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.use_bias)
self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.use_bias)
self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.use_bias)
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.use_bias)
class Ernie4_5_MoeStatics(nn.Module):
"""
Stores MoE (Mixture of Experts) statistics
- Bias for the gating
- Additionally, usage per expert in the original codebase
"""
def __init__(self, config):
super().__init__()
num_experts_groups = 1
num_experts = config.moe_num_experts
self.e_score_correction_bias = nn.Parameter(
torch.zeros(num_experts_groups, num_experts, dtype=torch.float32),
requires_grad=False,
)
def forward(self, hidden_states):
# NOTE: This is a workaround to enable TP with a module that only has parameters
#
# Otherwise, it stays as `DTensor` when called in the "super" forward
# 1. All other tensors are local (`torch.Tensor`)
# 2. Isolate does not work on `nn.Module` which only has parameters
return hidden_states + self.e_score_correction_bias.squeeze()
class Ernie4_5_MoeExperts(MixtralExperts):
def __init__(self, config):
super().__init__()
self.num_experts = config.moe_num_experts
self.intermediate_dim = config.moe_intermediate_size
def forward(
self,
hidden_states: torch.Tensor,
top_k_index: torch.Tensor,
top_k_weights: torch.Tensor,
) -> torch.Tensor:
final_hidden_states = torch.zeros_like(hidden_states)
with torch.no_grad():
expert_mask = torch.nn.functional.one_hot(top_k_index, num_classes=self.num_experts)
expert_mask = expert_mask.permute(2, 1, 0)
expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero()
for expert_idx in expert_hit:
expert_idx = expert_idx[0]
if expert_idx == self.num_experts:
continue
top_k_pos, token_idx = torch.where(expert_mask[expert_idx])
current_state = hidden_states[token_idx]
gate, up = nn.functional.linear(current_state, self.gate_up_proj[expert_idx]).chunk(2, dim=-1)
current_hidden_states = self.act_fn(gate) * up
current_hidden_states = nn.functional.linear(current_hidden_states, self.down_proj[expert_idx])
current_hidden_states = current_hidden_states * top_k_weights[token_idx, top_k_pos, None]
final_hidden_states.index_add_(0, token_idx, current_hidden_states.to(final_hidden_states.dtype))
return final_hidden_states
class Ernie4_5_MoeTopKRouter(nn.Module):
def __init__(self, config):
super().__init__()
self.weight = nn.Parameter(torch.zeros(config.moe_num_experts, config.hidden_size, dtype=torch.float32))
self.moe_statics = Ernie4_5_MoeStatics(config)
self.top_k = config.moe_k
self.norm_min = config.moe_norm_min
def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
device_type = (
hidden_states.device.type
if isinstance(hidden_states.device.type, str) and hidden_states.device.type != "mps"
else "cpu"
)
with maybe_autocast(device_type=device_type, enabled=False): # Force float32
router_logits = F.linear(hidden_states.float(), self.weight)
routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float)
_, selected_experts = torch.topk(self.moe_statics(routing_weights), self.top_k, dim=-1)
routing_weights = torch.gather(routing_weights, dim=-1, index=selected_experts)
routing_weights = routing_weights / torch.clamp(
routing_weights.sum(dim=-1, keepdim=True), min=self.norm_min
)
routing_weights = routing_weights.to(hidden_states.dtype)
return router_logits, selected_experts, routing_weights
class Ernie4_5_MoeSparseMoeBlock(nn.Module):
def __init__(self, config):
super().__init__()
self.hidden_dim = config.hidden_size
self.num_experts = config.moe_num_experts
self.top_k = config.moe_k
self.gate = Ernie4_5_MoeTopKRouter(config)
self.experts = Ernie4_5_MoeExperts(config)
self.shared_experts = None
if config.moe_num_shared_experts > 0:
self.shared_experts = Ernie4_5_MoeMLP(config, config.moe_intermediate_size * config.moe_num_shared_experts)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
batch_size, sequence_length, _ = hidden_states.shape
hidden_states = hidden_states.view(-1, self.hidden_dim)
if self.shared_experts is not None:
shared_output = self.shared_experts(hidden_states)
_, top_k_index, top_k_weights = self.gate(hidden_states)
final_hidden_states = self.experts(hidden_states, top_k_index, top_k_weights)
if self.shared_experts is not None:
final_hidden_states = final_hidden_states + shared_output
final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, self.hidden_dim)
return final_hidden_states.to(hidden_states.dtype)
class Ernie4_5_MoeDecoderLayer(Qwen3MoeDecoderLayer):
def __init__(self, config, layer_idx):
nn.Module.__init__(self)
self.hidden_size = config.hidden_size
self.self_attn = Ernie4_5_MoeAttention(config, layer_idx)
if (
((layer_idx + 1) % config.moe_layer_interval == 0)
and layer_idx >= config.moe_layer_start_index
and layer_idx <= config.moe_layer_end_index
):
self.mlp = Ernie4_5_MoeSparseMoeBlock(config)
else:
self.mlp = Ernie4_5_MoeMLP(config)
self.input_layernorm = Ernie4_5_MoeRMSNorm(config.hidden_size, config.rms_norm_eps)
self.post_attention_layernorm = Ernie4_5_MoeRMSNorm(config.hidden_size, config.rms_norm_eps)
@auto_docstring
class Ernie4_5_MoePreTrainedModel(MixtralPreTrainedModel):
config: Ernie4_5_MoeConfig
_no_split_modules = ["Ernie4_5_MoeDecoderLayer"]
# Not supporting multi-token prediction (MTP) atm
_keys_to_ignore_on_load_unexpected = ["mtp"]
_can_record_outputs = {
"router_logits": OutputRecorder(Ernie4_5_MoeTopKRouter, index=0),
"hidden_states": Ernie4_5_MoeDecoderLayer,
"attentions": Ernie4_5_MoeAttention,
}
_keep_in_fp32_modules_strict = ["gate.weight", "moe_statics"]
@torch.no_grad()
def _init_weights(self, module):
PreTrainedModel._init_weights(self, module)
if isinstance(module, Ernie4_5_MoeStatics):
init.zeros_(module.e_score_correction_bias)
elif isinstance(module, Ernie4_5_MoeExperts):
init.normal_(module.gate_up_proj, mean=0.0, std=self.config.initializer_range)
init.normal_(module.down_proj, mean=0.0, std=self.config.initializer_range)
@auto_docstring
class Ernie4_5_MoeModel(Ernie4_5_MoePreTrainedModel):
def __init__(self, config: Ernie4_5_MoeConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[Ernie4_5_MoeDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = Ernie4_5_MoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = Ernie4_5_MoeRotaryEmbedding(config=config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@check_model_inputs
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> MoeModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
hidden_states = inputs_embeds
# create position embeddings to be shared across the decoder layers
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = decoder_layer(
hidden_states,
position_embeddings=position_embeddings,
attention_mask=causal_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return MoeModelOutputWithPast( # only diff with Mistral is the output type, we need MoE
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
@auto_docstring
class Ernie4_5_MoeForCausalLM(MixtralForCausalLM):
def __init__(self, config):
PreTrainedModel.__init__(self, config)
self.model = Ernie4_5_MoeModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=config.use_bias)
self.router_aux_loss_coef = config.router_aux_loss_coef
self.num_experts = config.moe_num_experts
self.num_experts_per_tok = config.moe_k
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, **super_kwargs):
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
"""
super().forward(**super_kwargs)
__all__ = [
"Ernie4_5_MoeForCausalLM",
"Ernie4_5_MoeModel",
"Ernie4_5_MoePreTrainedModel",
]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/ernie4_5_moe/__init__.py | src/transformers/models/ernie4_5_moe/__init__.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_ernie4_5_moe import *
from .modeling_ernie4_5_moe import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/ernie4_5_moe/modeling_ernie4_5_moe.py | src/transformers/models/ernie4_5_moe/modeling_ernie4_5_moe.py | # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# This file was automatically generated from src/transformers/models/ernie4_5_moe/modular_ernie4_5_moe.py.
# Do NOT edit this file manually as any edits will be overwritten by the generation of
# the file from the modular. If any change should be done, please apply the change to the
# modular_ernie4_5_moe.py file directly. One of our CI enforces this.
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# Copyright (c) 2025 Baidu, Inc. and HuggingFace Inc. team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Callable
from typing import Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ... import initialization as init
from ...activations import ACT2FN
from ...cache_utils import Cache, DynamicCache
from ...generation import GenerationMixin
from ...integrations import use_kernel_forward_from_hub, use_kernelized_func
from ...masking_utils import create_causal_mask
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from ...utils.generic import OutputRecorder, check_model_inputs, maybe_autocast
from .configuration_ernie4_5_moe import Ernie4_5_MoeConfig
@use_kernel_forward_from_hub("RMSNorm")
class Ernie4_5_MoeRMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
Ernie4_5_MoeRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
class Ernie4_5_MoeMLP(nn.Module):
def __init__(self, config, intermediate_size=None):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size if intermediate_size is None else intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.use_bias)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.use_bias)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.use_bias)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
return down_proj
class Ernie4_5_MoeRotaryEmbedding(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: Ernie4_5_MoeConfig, device=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.register_buffer("original_inv_freq", inv_freq.clone(), persistent=False)
@staticmethod
def compute_default_rope_parameters(
config: Optional[Ernie4_5_MoeConfig] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with maybe_autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
# keeping it in full precision
return cos, sin
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., 0::2]
x2 = x[..., 1::2]
return torch.stack((-x2, x1), dim=-1).flatten(-2)
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
# glm rope style (with full dim) and full precision
original_dtype = q.dtype
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
# Interleave them instead of usual shape
cos = cos[..., : cos.shape[-1] // 2].repeat_interleave(2, dim=-1)
sin = sin[..., : sin.shape[-1] // 2].repeat_interleave(2, dim=-1)
q_embed = (q.float() * cos) + (rotate_half(q).float() * sin)
k_embed = (k.float() * cos) + (rotate_half(k).float() * sin)
return q_embed.to(original_dtype), k_embed.to(original_dtype)
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
@use_kernelized_func(apply_rotary_pos_emb)
class Ernie4_5_MoeAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: Ernie4_5_MoeConfig, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = 0.0
self.is_causal = True
self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.use_bias)
self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.use_bias)
self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.use_bias)
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.use_bias)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class Ernie4_5_MoeStatics(nn.Module):
"""
Stores MoE (Mixture of Experts) statistics
- Bias for the gating
- Additionally, usage per expert in the original codebase
"""
def __init__(self, config):
super().__init__()
num_experts_groups = 1
num_experts = config.moe_num_experts
self.e_score_correction_bias = nn.Parameter(
torch.zeros(num_experts_groups, num_experts, dtype=torch.float32),
requires_grad=False,
)
def forward(self, hidden_states):
# NOTE: This is a workaround to enable TP with a module that only has parameters
#
# Otherwise, it stays as `DTensor` when called in the "super" forward
# 1. All other tensors are local (`torch.Tensor`)
# 2. Isolate does not work on `nn.Module` which only has parameters
return hidden_states + self.e_score_correction_bias.squeeze()
class Ernie4_5_MoeExperts(nn.Module):
"""Collection of expert weights stored as 3D tensors."""
def __init__(self, config):
super().__init__()
self.num_experts = config.moe_num_experts
self.hidden_dim = config.hidden_size
self.intermediate_dim = config.moe_intermediate_size
self.gate_up_proj = nn.Parameter(torch.empty(self.num_experts, 2 * self.intermediate_dim, self.hidden_dim))
self.down_proj = nn.Parameter(torch.empty(self.num_experts, self.hidden_dim, self.intermediate_dim))
self.act_fn = ACT2FN[config.hidden_act]
def forward(
self,
hidden_states: torch.Tensor,
top_k_index: torch.Tensor,
top_k_weights: torch.Tensor,
) -> torch.Tensor:
final_hidden_states = torch.zeros_like(hidden_states)
with torch.no_grad():
expert_mask = torch.nn.functional.one_hot(top_k_index, num_classes=self.num_experts)
expert_mask = expert_mask.permute(2, 1, 0)
expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero()
for expert_idx in expert_hit:
expert_idx = expert_idx[0]
if expert_idx == self.num_experts:
continue
top_k_pos, token_idx = torch.where(expert_mask[expert_idx])
current_state = hidden_states[token_idx]
gate, up = nn.functional.linear(current_state, self.gate_up_proj[expert_idx]).chunk(2, dim=-1)
current_hidden_states = self.act_fn(gate) * up
current_hidden_states = nn.functional.linear(current_hidden_states, self.down_proj[expert_idx])
current_hidden_states = current_hidden_states * top_k_weights[token_idx, top_k_pos, None]
final_hidden_states.index_add_(0, token_idx, current_hidden_states.to(final_hidden_states.dtype))
return final_hidden_states
class Ernie4_5_MoeTopKRouter(nn.Module):
def __init__(self, config):
super().__init__()
self.weight = nn.Parameter(torch.zeros(config.moe_num_experts, config.hidden_size, dtype=torch.float32))
self.moe_statics = Ernie4_5_MoeStatics(config)
self.top_k = config.moe_k
self.norm_min = config.moe_norm_min
def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
device_type = (
hidden_states.device.type
if isinstance(hidden_states.device.type, str) and hidden_states.device.type != "mps"
else "cpu"
)
with maybe_autocast(device_type=device_type, enabled=False): # Force float32
router_logits = F.linear(hidden_states.float(), self.weight)
routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float)
_, selected_experts = torch.topk(self.moe_statics(routing_weights), self.top_k, dim=-1)
routing_weights = torch.gather(routing_weights, dim=-1, index=selected_experts)
routing_weights = routing_weights / torch.clamp(
routing_weights.sum(dim=-1, keepdim=True), min=self.norm_min
)
routing_weights = routing_weights.to(hidden_states.dtype)
return router_logits, selected_experts, routing_weights
class Ernie4_5_MoeSparseMoeBlock(nn.Module):
def __init__(self, config):
super().__init__()
self.hidden_dim = config.hidden_size
self.num_experts = config.moe_num_experts
self.top_k = config.moe_k
self.gate = Ernie4_5_MoeTopKRouter(config)
self.experts = Ernie4_5_MoeExperts(config)
self.shared_experts = None
if config.moe_num_shared_experts > 0:
self.shared_experts = Ernie4_5_MoeMLP(config, config.moe_intermediate_size * config.moe_num_shared_experts)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
batch_size, sequence_length, _ = hidden_states.shape
hidden_states = hidden_states.view(-1, self.hidden_dim)
if self.shared_experts is not None:
shared_output = self.shared_experts(hidden_states)
_, top_k_index, top_k_weights = self.gate(hidden_states)
final_hidden_states = self.experts(hidden_states, top_k_index, top_k_weights)
if self.shared_experts is not None:
final_hidden_states = final_hidden_states + shared_output
final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, self.hidden_dim)
return final_hidden_states.to(hidden_states.dtype)
class Ernie4_5_MoeDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config, layer_idx):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = Ernie4_5_MoeAttention(config, layer_idx)
if (
((layer_idx + 1) % config.moe_layer_interval == 0)
and layer_idx >= config.moe_layer_start_index
and layer_idx <= config.moe_layer_end_index
):
self.mlp = Ernie4_5_MoeSparseMoeBlock(config)
else:
self.mlp = Ernie4_5_MoeMLP(config)
self.input_layernorm = Ernie4_5_MoeRMSNorm(config.hidden_size, config.rms_norm_eps)
self.post_attention_layernorm = Ernie4_5_MoeRMSNorm(config.hidden_size, config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
@auto_docstring
class Ernie4_5_MoePreTrainedModel(PreTrainedModel):
config: Ernie4_5_MoeConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["Ernie4_5_MoeDecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = False # MoE models don't work with torch.compile (`torch.where(condition)` not supported)
_supports_attention_backend = True
_can_record_outputs = {
"router_logits": OutputRecorder(Ernie4_5_MoeTopKRouter, index=0),
"hidden_states": Ernie4_5_MoeDecoderLayer,
"attentions": Ernie4_5_MoeAttention,
}
# Not supporting multi-token prediction (MTP) atm
_keys_to_ignore_on_load_unexpected = ["mtp"]
_keep_in_fp32_modules_strict = ["gate.weight", "moe_statics"]
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, Ernie4_5_MoeStatics):
init.zeros_(module.e_score_correction_bias)
elif isinstance(module, Ernie4_5_MoeExperts):
init.normal_(module.gate_up_proj, mean=0.0, std=self.config.initializer_range)
init.normal_(module.down_proj, mean=0.0, std=self.config.initializer_range)
@auto_docstring
class Ernie4_5_MoeModel(Ernie4_5_MoePreTrainedModel):
def __init__(self, config: Ernie4_5_MoeConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[Ernie4_5_MoeDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = Ernie4_5_MoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = Ernie4_5_MoeRotaryEmbedding(config=config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@check_model_inputs
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> MoeModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
hidden_states = inputs_embeds
# create position embeddings to be shared across the decoder layers
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = decoder_layer(
hidden_states,
position_embeddings=position_embeddings,
attention_mask=causal_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return MoeModelOutputWithPast( # only diff with Mistral is the output type, we need MoE
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
def load_balancing_loss_func(
gate_logits: Union[torch.Tensor, tuple[torch.Tensor], None],
num_experts: Optional[int] = None,
top_k=2,
attention_mask: Optional[torch.Tensor] = None,
) -> Union[torch.Tensor, int]:
r"""
Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
See Switch Transformer (https://huggingface.co/papers/2101.03961) for more details. This function implements the loss
function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
experts is too unbalanced.
Args:
gate_logits:
Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of
shape [batch_size X sequence_length, num_experts].
num_experts:
Number of experts
top_k:
The number of experts to route per-token, can be also interpreted as the `top-k` routing
parameter.
attention_mask (`torch.Tensor`, *optional*):
The attention_mask used in forward function
shape [batch_size X sequence_length] if not None.
Returns:
The auxiliary loss.
"""
if gate_logits is None or not isinstance(gate_logits, tuple):
return 0
if isinstance(gate_logits, tuple):
compute_device = gate_logits[0].device
concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0)
routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1)
_, selected_experts = torch.topk(routing_weights, top_k, dim=-1)
expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts)
if attention_mask is None:
# Compute the percentage of tokens routed to each experts
tokens_per_expert = torch.mean(expert_mask.float(), dim=0)
# Compute the average probability of routing to these experts
router_prob_per_expert = torch.mean(routing_weights, dim=0)
else:
batch_size, sequence_length = attention_mask.shape
num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length)
# Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask
expert_attention_mask = (
attention_mask[None, :, :, None, None]
.expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts))
.reshape(-1, top_k, num_experts)
.to(compute_device)
)
# Compute the percentage of tokens routed to each experts
tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum(
expert_attention_mask, dim=0
)
# Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert
router_per_expert_attention_mask = (
attention_mask[None, :, :, None]
.expand((num_hidden_layers, batch_size, sequence_length, num_experts))
.reshape(-1, num_experts)
.to(compute_device)
)
# Compute the average probability of routing to these experts
router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum(
router_per_expert_attention_mask, dim=0
)
overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0))
return overall_loss * num_experts
@auto_docstring
class Ernie4_5_MoeForCausalLM(Ernie4_5_MoePreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
_tp_plan = {"lm_head": "colwise_rep"}
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
def __init__(self, config):
super().__init__(config)
self.model = Ernie4_5_MoeModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=config.use_bias)
self.router_aux_loss_coef = config.router_aux_loss_coef
self.num_experts = config.moe_num_experts
self.num_experts_per_tok = config.moe_k
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_router_logits: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> MoeCausalLMOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
"""
output_router_logits = (
output_router_logits if output_router_logits is not None else self.config.output_router_logits
)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs: MoeModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_router_logits=output_router_logits,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)
aux_loss = None
if output_router_logits:
aux_loss = load_balancing_loss_func(
outputs.router_logits,
self.num_experts,
self.num_experts_per_tok,
attention_mask,
)
if labels is not None:
loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | true |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/ernie4_5_moe/configuration_ernie4_5_moe.py | src/transformers/models/ernie4_5_moe/configuration_ernie4_5_moe.py | # Copyright (c) 2025 Baidu, Inc. and HuggingFace Inc. team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ernie 4.5 MoE model configuration"""
from typing import Optional
from ...configuration_utils import PreTrainedConfig
from ...modeling_rope_utils import RopeParameters
from ...utils import logging
logger = logging.get_logger(__name__)
class Ernie4_5_MoeConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Ernie4_5_MoeModel`]. It is used to instantiate a
Ernie 4.5 MoE model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of [baidu/ERNIE-4.5-21B-A3B-PT](https://huggingface.co/baidu/ERNIE-4.5-21B-A3B-PT).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 103424):
Vocabulary size of the Ernie 4.5 MoE model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`Ernie4_5_MoeModel`]
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 1):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 2):
End of stream token id.
hidden_size (`int`, *optional*, defaults to 2560):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 12288):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 28):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 20):
Number of attention heads for each attention layer in the Transformer encoder.
num_key_value_heads (`int`, *optional*, defaults to 4):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `32`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 131072):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether the model's input and output word embeddings should be tied.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
use_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in any of the projections including mlp and attention for example.
moe_intermediate_size (`int`, *optional*, defaults to 1536):
Intermediate size of the routed expert.
moe_k (`int`, *optional*, defaults to 6):
Number of selected experts.
moe_num_experts (`int`, *optional*, defaults to 64):
Number of routed experts.
moe_num_shared_experts (`int`, *optional*, defaults to 2):
The number of experts that are shared for all MoE forwards.
moe_layer_start_index (`int`, *optional*, defaults to 1):
The first index at which MoE layers start to appear.
moe_layer_end_index (`int`, *optional*, defaults to -1):
The last possible index for a MoE layer.
moe_layer_interval (`int`, *optional*, defaults to 1):
The intervals between MoE layers to appear.
moe_norm_min (`float`, *optional*, defaults to 1e-12):
Minimum division value during routing normalization.
output_router_logits (`bool`, *optional*, defaults to `False`):
Whether or not the router logits should be returned by the model. Enabling this will also
allow the model to output the auxiliary loss, including load balancing loss and router z-loss.
router_aux_loss_coef (`float`, *optional*, defaults to 0.001):
The aux loss factor for the total loss.
```python
>>> from transformers import Ernie4_5_MoeModel, Ernie4_5_MoEConfig
>>> # Initializing a Ernie4_5_MoE style configuration
>>> configuration = Ernie4_5_MoEConfig()
>>> # Initializing a model from the ERNIE-4.5-21B-A3B style configuration
>>> model = Ernie4_5_MoeModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "ernie4_5_moe"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {"num_experts": "moe_num_experts", "num_experts_per_tok": "moe_k"}
default_theta = 500000.0
# Default tensor parallel plan for base model `Ernie4_5_MoE`
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.experts.gate_up_proj": "local_rowwise",
"layers.*.mlp.experts.down_proj": "local_rowwise",
"layers.*.mlp.experts": "gather",
"layers.*.mlp.shared_experts.gate_proj": "colwise",
"layers.*.mlp.shared_experts.up_proj": "colwise",
"layers.*.mlp.shared_experts.down_proj": "rowwise",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
def __init__(
self,
vocab_size: Optional[int] = 103424,
pad_token_id: Optional[int] = 0,
bos_token_id: Optional[int] = 1,
eos_token_id: Optional[int] = 2,
hidden_size: Optional[int] = 2560,
intermediate_size: Optional[int] = 12288,
num_hidden_layers: Optional[int] = 28,
num_attention_heads: Optional[int] = 20,
num_key_value_heads: Optional[int] = 4,
hidden_act: Optional[str] = "silu",
max_position_embeddings: Optional[int] = 131072,
initializer_range: Optional[float] = 0.02,
rms_norm_eps: Optional[int] = 1e-5,
use_cache: Optional[bool] = True,
tie_word_embeddings: Optional[bool] = True,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
use_bias: Optional[int] = False,
moe_intermediate_size: Optional[int] = 1536,
moe_k: Optional[int] = 6,
moe_num_experts: Optional[int] = 64,
moe_num_shared_experts: Optional[int] = 2,
moe_layer_start_index: Optional[int] = 1,
moe_layer_end_index: Optional[int] = -1,
moe_layer_interval: Optional[int] = 1,
moe_norm_min: Optional[int] = 1e-12,
output_router_logits: Optional[bool] = False,
router_aux_loss_coef: Optional[float] = 0.001,
**kwargs,
):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.use_bias = use_bias
# MoE arguments
self.moe_intermediate_size = moe_intermediate_size
self.moe_k = moe_k
self.moe_num_experts = moe_num_experts
self.moe_num_shared_experts = moe_num_shared_experts
self.moe_layer_start_index = moe_layer_start_index
self.moe_layer_end_index = self.num_hidden_layers - 1 if moe_layer_end_index == -1 else moe_layer_end_index
self.moe_layer_interval = moe_layer_interval
self.moe_norm_min = moe_norm_min
self.output_router_logits = output_router_logits
self.router_aux_loss_coef = router_aux_loss_coef
self.rope_parameters = rope_parameters
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
__all__ = ["Ernie4_5_MoeConfig"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/beit/configuration_beit.py | src/transformers/models/beit/configuration_beit.py | # coding=utf-8
# Copyright Microsoft Research and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BEiT model configuration"""
import warnings
from ...configuration_utils import PreTrainedConfig
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
class BeitConfig(BackboneConfigMixin, PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`BeitModel`]. It is used to instantiate an BEiT
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the BEiT
[microsoft/beit-base-patch16-224-pt22k](https://huggingface.co/microsoft/beit-base-patch16-224-pt22k) architecture.
Args:
vocab_size (`int`, *optional*, defaults to 8192):
Vocabulary size of the BEiT model. Defines the number of different image tokens that can be used during
pre-training.
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
use_mask_token (`bool`, *optional*, defaults to `False`):
Whether to use a mask token for masked image modeling.
use_absolute_position_embeddings (`bool`, *optional*, defaults to `False`):
Whether to use BERT-style absolute position embeddings.
use_relative_position_bias (`bool`, *optional*, defaults to `False`):
Whether to use T5-style relative position embeddings in the self-attention layers.
use_shared_relative_position_bias (`bool`, *optional*, defaults to `False`):
Whether to use the same relative position embeddings across all self-attention layers of the Transformer.
layer_scale_init_value (`float`, *optional*, defaults to 0.1):
Scale to use in the self-attention layers. 0.1 for base, 1e-5 for large. Set 0 to disable layer scale.
drop_path_rate (`float`, *optional*, defaults to 0.1):
Stochastic depth rate per sample (when applied in the main path of residual layers).
use_mean_pooling (`bool`, *optional*, defaults to `True`):
Whether to mean pool the final hidden states of the patches instead of using the final hidden state of the
CLS token, before applying the classification head.
pool_scales (`tuple[int]`, *optional*, defaults to `[1, 2, 3, 6]`):
Pooling scales used in Pooling Pyramid Module applied on the last feature map.
use_auxiliary_head (`bool`, *optional*, defaults to `True`):
Whether to use an auxiliary head during training.
auxiliary_loss_weight (`float`, *optional*, defaults to 0.4):
Weight of the cross-entropy loss of the auxiliary head.
auxiliary_channels (`int`, *optional*, defaults to 256):
Number of channels to use in the auxiliary head.
auxiliary_num_convs (`int`, *optional*, defaults to 1):
Number of convolutional layers to use in the auxiliary head.
auxiliary_concat_input (`bool`, *optional*, defaults to `False`):
Whether to concatenate the output of the auxiliary head with the input before the classification layer.
semantic_loss_ignore_index (`int`, *optional*, defaults to 255):
The index that is ignored by the loss function of the semantic segmentation model.
out_features (`list[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
out_indices (`list[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
If unset and `out_features` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
add_fpn (`bool`, *optional*, defaults to `False`):
Whether to add a FPN as part of the backbone. Only relevant for [`BeitBackbone`].
reshape_hidden_states (`bool`, *optional*, defaults to `True`):
Whether to reshape the feature maps to 4D tensors of shape `(batch_size, hidden_size, height, width)` in
case the model is used as backbone. If `False`, the feature maps will be 3D tensors of shape `(batch_size,
seq_len, hidden_size)`. Only relevant for [`BeitBackbone`].
Example:
```python
>>> from transformers import BeitConfig, BeitModel
>>> # Initializing a BEiT beit-base-patch16-224-pt22k style configuration
>>> configuration = BeitConfig()
>>> # Initializing a model (with random weights) from the beit-base-patch16-224-pt22k style configuration
>>> model = BeitModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "beit"
def __init__(
self,
vocab_size=8192,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
layer_norm_eps=1e-12,
image_size=224,
patch_size=16,
num_channels=3,
use_mask_token=False,
use_absolute_position_embeddings=False,
use_relative_position_bias=False,
use_shared_relative_position_bias=False,
layer_scale_init_value=0.1,
drop_path_rate=0.1,
use_mean_pooling=True,
pool_scales=[1, 2, 3, 6],
use_auxiliary_head=True,
auxiliary_loss_weight=0.4,
auxiliary_channels=256,
auxiliary_num_convs=1,
auxiliary_concat_input=False,
semantic_loss_ignore_index=255,
out_features=None,
out_indices=None,
add_fpn=False,
reshape_hidden_states=True,
**kwargs,
):
super().__init__(**kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.use_mask_token = use_mask_token
self.use_absolute_position_embeddings = use_absolute_position_embeddings
self.use_relative_position_bias = use_relative_position_bias
self.use_shared_relative_position_bias = use_shared_relative_position_bias
self.layer_scale_init_value = layer_scale_init_value
self.drop_path_rate = drop_path_rate
self.use_mean_pooling = use_mean_pooling
# decode head attributes (semantic segmentation)
self.pool_scales = pool_scales
# auxiliary head attributes (semantic segmentation)
self.use_auxiliary_head = use_auxiliary_head
self.auxiliary_loss_weight = auxiliary_loss_weight
self.auxiliary_channels = auxiliary_channels
self.auxiliary_num_convs = auxiliary_num_convs
self.auxiliary_concat_input = auxiliary_concat_input
self.semantic_loss_ignore_index = semantic_loss_ignore_index
# handle backwards compatibility
if "segmentation_indices" in kwargs:
warnings.warn(
"The `segmentation_indices` argument is deprecated and will be removed in a future version, use `out_indices` instead.",
FutureWarning,
)
out_indices = kwargs.pop("segmentation_indices")
# backbone attributes
self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, self.num_hidden_layers + 1)]
self._out_features, self._out_indices = get_aligned_output_features_output_indices(
out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
)
self.add_fpn = add_fpn
self.reshape_hidden_states = reshape_hidden_states
__all__ = ["BeitConfig"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/beit/image_processing_beit.py | src/transformers/models/beit/image_processing_beit.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image processor class for Beit."""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import INIT_SERVICE_KWARGS, BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
infer_channel_dimension_format,
is_scaled_image,
make_flat_list_of_images,
to_numpy_array,
valid_images,
validate_preprocess_arguments,
)
from ...processing_utils import ImagesKwargs
from ...utils import (
TensorType,
filter_out_non_signature_kwargs,
is_torch_available,
is_torch_tensor,
is_vision_available,
logging,
)
from ...utils.import_utils import requires
if is_vision_available():
import PIL
if is_torch_available():
import torch
logger = logging.get_logger(__name__)
class BeitImageProcessorKwargs(ImagesKwargs, total=False):
r"""
do_reduce_labels (`bool`, *optional*, defaults to `self.do_reduce_labels`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0
is used for background, and background itself is not included in all classes of a dataset (e.g.
ADE20k). The background label will be replaced by 255.
"""
do_reduce_labels: bool
@requires(backends=("vision",))
class BeitImageProcessor(BaseImageProcessor):
r"""
Constructs a BEiT image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
`do_resize` parameter in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"height": 256, "width": 256}`):
Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess`
method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
`preprocess` method.
do_center_crop (`bool`, *optional*, defaults to `True`):
Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the image
is padded with 0's and then center cropped. Can be overridden by the `do_center_crop` parameter in the
`preprocess` method.
crop_size (`dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
Desired output size when applying center-cropping. Only has an effect if `do_center_crop` is set to `True`.
Can be overridden by the `crop_size` parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
`preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
parameter in the `preprocess` method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
The mean to use if normalizing the image. This is a float or list of floats of length of the number of
channels of the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
The standard deviation to use if normalizing the image. This is a float or list of floats of length of the
number of channels of the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
do_reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is
used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The
background label will be replaced by 255. Can be overridden by the `do_reduce_labels` parameter in the
`preprocess` method.
"""
model_input_names = ["pixel_values"]
valid_kwargs = BeitImageProcessorKwargs
@filter_out_non_signature_kwargs(extra=INIT_SERVICE_KWARGS)
def __init__(
self,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
resample: PILImageResampling = PILImageResampling.BICUBIC,
do_center_crop: bool = True,
crop_size: Optional[dict[str, int]] = None,
rescale_factor: Union[int, float] = 1 / 255,
do_rescale: bool = True,
do_normalize: bool = True,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_reduce_labels: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
size = size if size is not None else {"height": 256, "width": 256}
size = get_size_dict(size)
crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
crop_size = get_size_dict(crop_size, param_name="crop_size")
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
self.do_reduce_labels = do_reduce_labels
def resize(
self,
image: np.ndarray,
size: dict[str, int],
resample: PILImageResampling = PILImageResampling.BICUBIC,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize an image to (size["height"], size["width"]).
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PIL.Image.BICUBIC`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
size = get_size_dict(size, default_to_square=True, param_name="size")
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` argument must contain `height` and `width` keys. Got {size.keys()}")
return resize(
image,
size=(size["height"], size["width"]),
resample=resample,
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
def reduce_label(self, label: ImageInput) -> np.ndarray:
label = to_numpy_array(label)
# Avoid using underflow conversion
label[label == 0] = 255
label = label - 1
label[label == 254] = 255
return label
def _preprocess(
self,
image: ImageInput,
do_reduce_labels: Optional[bool] = None,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
do_center_crop: Optional[bool] = None,
crop_size: Optional[dict[str, int]] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
):
if do_reduce_labels:
image = self.reduce_label(image)
if do_resize:
image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
if do_center_crop:
image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
if do_normalize:
image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
return image
def _preprocess_image(
self,
image: ImageInput,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
do_center_crop: Optional[bool] = None,
crop_size: Optional[dict[str, int]] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> np.ndarray:
"""Preprocesses a single image."""
# All transformations expect numpy arrays.
image = to_numpy_array(image)
if do_rescale and is_scaled_image(image):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
input_data_format = infer_channel_dimension_format(image)
image = self._preprocess(
image,
do_reduce_labels=False,
do_resize=do_resize,
size=size,
resample=resample,
do_center_crop=do_center_crop,
crop_size=crop_size,
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
input_data_format=input_data_format,
)
if data_format is not None:
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
return image
def _preprocess_segmentation_map(
self,
segmentation_map: ImageInput,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
do_center_crop: Optional[bool] = None,
crop_size: Optional[dict[str, int]] = None,
do_reduce_labels: Optional[bool] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
):
"""Preprocesses a single segmentation map."""
# All transformations expect numpy arrays.
segmentation_map = to_numpy_array(segmentation_map)
# Add an axis to the segmentation maps for transformations.
if segmentation_map.ndim == 2:
segmentation_map = segmentation_map[None, ...]
added_dimension = True
input_data_format = ChannelDimension.FIRST
else:
added_dimension = False
if input_data_format is None:
input_data_format = infer_channel_dimension_format(segmentation_map, num_channels=1)
segmentation_map = self._preprocess(
image=segmentation_map,
do_reduce_labels=do_reduce_labels,
do_resize=do_resize,
resample=resample,
size=size,
do_center_crop=do_center_crop,
crop_size=crop_size,
do_normalize=False,
do_rescale=False,
input_data_format=ChannelDimension.FIRST,
)
# Remove extra axis if added
if added_dimension:
segmentation_map = np.squeeze(segmentation_map, axis=0)
segmentation_map = segmentation_map.astype(np.int64)
return segmentation_map
def __call__(self, images, segmentation_maps=None, **kwargs):
# Overrides the `__call__` method of the `Preprocessor` class such that the images and segmentation maps can both
# be passed in as positional arguments.
return super().__call__(images, segmentation_maps=segmentation_maps, **kwargs)
@filter_out_non_signature_kwargs()
def preprocess(
self,
images: ImageInput,
segmentation_maps: Optional[ImageInput] = None,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
do_center_crop: Optional[bool] = None,
crop_size: Optional[dict[str, int]] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_reduce_labels: Optional[bool] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: ChannelDimension = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
segmentation_maps (`ImageInput`, *optional*)
Segmentation maps to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
has an effect if `do_resize` is set to `True`.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the image.
crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`):
Size of the image after center crop. If one edge the image is smaller than `crop_size`, it will be
padded with zeros and then cropped
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation.
do_reduce_labels (`bool`, *optional*, defaults to `self.do_reduce_labels`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0
is used for background, and background itself is not included in all classes of a dataset (e.g.
ADE20k). The background label will be replaced by 255.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
size = get_size_dict(size, default_to_square=True, param_name="size")
resample = resample if resample is not None else self.resample
do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
crop_size = crop_size if crop_size is not None else self.crop_size
crop_size = get_size_dict(crop_size, default_to_square=True, param_name="crop_size")
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
do_reduce_labels = do_reduce_labels if do_reduce_labels is not None else self.do_reduce_labels
images = make_flat_list_of_images(images)
if segmentation_maps is not None:
segmentation_maps = make_flat_list_of_images(segmentation_maps, expected_ndims=2)
if segmentation_maps is not None and not valid_images(segmentation_maps):
raise ValueError(
"Invalid segmentation_maps type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor"
)
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_center_crop=do_center_crop,
crop_size=crop_size,
do_resize=do_resize,
size=size,
resample=resample,
)
images = [
self._preprocess_image(
image=img,
do_resize=do_resize,
do_center_crop=do_center_crop,
do_rescale=do_rescale,
do_normalize=do_normalize,
resample=resample,
size=size,
rescale_factor=rescale_factor,
crop_size=crop_size,
image_mean=image_mean,
image_std=image_std,
data_format=data_format,
input_data_format=input_data_format,
)
for img in images
]
data = {"pixel_values": images}
if segmentation_maps is not None:
segmentation_maps = [
self._preprocess_segmentation_map(
segmentation_map=segmentation_map,
do_reduce_labels=do_reduce_labels,
do_resize=do_resize,
resample=resample,
size=size,
do_center_crop=do_center_crop,
crop_size=crop_size,
)
for segmentation_map in segmentation_maps
]
data["labels"] = segmentation_maps
return BatchFeature(data=data, tensor_type=return_tensors)
def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[list[tuple]] = None):
"""
Converts the output of [`BeitForSemanticSegmentation`] into semantic segmentation maps.
Args:
outputs ([`BeitForSemanticSegmentation`]):
Raw outputs of the model.
target_sizes (`list[Tuple]` of length `batch_size`, *optional*):
List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,
predictions will not be resized.
Returns:
semantic_segmentation: `list[torch.Tensor]` of length `batch_size`, where each item is a semantic
segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
specified). Each entry of each `torch.Tensor` correspond to a semantic class id.
"""
logits = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(logits) != len(target_sizes):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
)
if is_torch_tensor(target_sizes):
target_sizes = target_sizes.numpy()
semantic_segmentation = []
for idx in range(len(logits)):
resized_logits = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False
)
semantic_map = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(semantic_map)
else:
semantic_segmentation = logits.argmax(dim=1)
semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
__all__ = ["BeitImageProcessor"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/beit/convert_beit_unilm_to_pytorch.py | src/transformers/models/beit/convert_beit_unilm_to_pytorch.py | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert BEiT checkpoints from the unilm repository."""
import argparse
import json
from pathlib import Path
import requests
import torch
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BeitConfig,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitImageProcessor,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
def create_rename_keys(config, has_lm_head=False, is_semantic=False):
prefix = "backbone." if is_semantic else ""
rename_keys = []
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"{prefix}blocks.{i}.norm1.weight", f"beit.encoder.layer.{i}.layernorm_before.weight"))
rename_keys.append((f"{prefix}blocks.{i}.norm1.bias", f"beit.encoder.layer.{i}.layernorm_before.bias"))
rename_keys.append(
(f"{prefix}blocks.{i}.attn.proj.weight", f"beit.encoder.layer.{i}.attention.output.dense.weight")
)
rename_keys.append(
(f"{prefix}blocks.{i}.attn.proj.bias", f"beit.encoder.layer.{i}.attention.output.dense.bias")
)
rename_keys.append((f"{prefix}blocks.{i}.norm2.weight", f"beit.encoder.layer.{i}.layernorm_after.weight"))
rename_keys.append((f"{prefix}blocks.{i}.norm2.bias", f"beit.encoder.layer.{i}.layernorm_after.bias"))
rename_keys.append((f"{prefix}blocks.{i}.mlp.fc1.weight", f"beit.encoder.layer.{i}.intermediate.dense.weight"))
rename_keys.append((f"{prefix}blocks.{i}.mlp.fc1.bias", f"beit.encoder.layer.{i}.intermediate.dense.bias"))
rename_keys.append((f"{prefix}blocks.{i}.mlp.fc2.weight", f"beit.encoder.layer.{i}.output.dense.weight"))
rename_keys.append((f"{prefix}blocks.{i}.mlp.fc2.bias", f"beit.encoder.layer.{i}.output.dense.bias"))
# projection layer + position embeddings
rename_keys.extend(
[
(f"{prefix}cls_token", "beit.embeddings.cls_token"),
(f"{prefix}patch_embed.proj.weight", "beit.embeddings.patch_embeddings.projection.weight"),
(f"{prefix}patch_embed.proj.bias", "beit.embeddings.patch_embeddings.projection.bias"),
]
)
if has_lm_head:
# mask token + shared relative position bias + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
(
"rel_pos_bias.relative_position_bias_table",
"beit.encoder.relative_position_bias.relative_position_bias_table",
),
(
"rel_pos_bias.relative_position_index",
"beit.encoder.relative_position_bias.relative_position_index",
),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
]
)
elif is_semantic:
# semantic segmentation classification heads
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
]
)
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
]
)
return rename_keys
# we split up the matrix of each encoder layer into queries, keys and values
def read_in_q_k_v(state_dict, config, has_lm_head=False, is_semantic=False):
for i in range(config.num_hidden_layers):
prefix = "backbone." if is_semantic else ""
# queries, keys and values
in_proj_weight = state_dict.pop(f"{prefix}blocks.{i}.attn.qkv.weight")
q_bias = state_dict.pop(f"{prefix}blocks.{i}.attn.q_bias")
v_bias = state_dict.pop(f"{prefix}blocks.{i}.attn.v_bias")
state_dict[f"beit.encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[
: config.hidden_size, :
]
state_dict[f"beit.encoder.layer.{i}.attention.attention.query.bias"] = q_bias
state_dict[f"beit.encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
state_dict[f"beit.encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[
-config.hidden_size :, :
]
state_dict[f"beit.encoder.layer.{i}.attention.attention.value.bias"] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
gamma_1 = state_dict.pop(f"{prefix}blocks.{i}.gamma_1")
gamma_2 = state_dict.pop(f"{prefix}blocks.{i}.gamma_2")
state_dict[f"beit.encoder.layer.{i}.lambda_1"] = gamma_1
state_dict[f"beit.encoder.layer.{i}.lambda_2"] = gamma_2
# relative_position bias table + index
if not has_lm_head:
# each layer has its own relative position bias
table = state_dict.pop(f"{prefix}blocks.{i}.attn.relative_position_bias_table")
index = state_dict.pop(f"{prefix}blocks.{i}.attn.relative_position_index")
state_dict[
f"beit.encoder.layer.{i}.attention.attention.relative_position_bias.relative_position_bias_table"
] = table
state_dict[
f"beit.encoder.layer.{i}.attention.attention.relative_position_bias.relative_position_index"
] = index
def rename_key(dct, old, new):
val = dct.pop(old)
dct[new] = val
# We will verify our results on an image of cute cats
def prepare_img():
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
im = Image.open(requests.get(url, stream=True).raw)
return im
@torch.no_grad()
def convert_beit_checkpoint(checkpoint_url, pytorch_dump_folder_path):
"""
Copy/paste/tweak model's weights to our BEiT structure.
"""
# define default BEiT configuration
config = BeitConfig()
has_lm_head = False
is_semantic = False
repo_id = "huggingface/label-files"
# set config parameters based on URL
if checkpoint_url[-9:-4] == "pt22k":
# masked image modeling
config.use_shared_relative_position_bias = True
config.use_mask_token = True
has_lm_head = True
elif checkpoint_url[-9:-4] == "ft22k":
# intermediate fine-tuning on ImageNet-22k
config.use_relative_position_bias = True
config.num_labels = 21841
filename = "imagenet-22k-id2label.json"
id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
id2label = {int(k): v for k, v in id2label.items()}
# this dataset contains 21843 labels but the model only has 21841
# we delete the classes as mentioned in https://github.com/google-research/big_transfer/issues/18
del id2label[9205]
del id2label[15027]
config.id2label = id2label
config.label2id = {v: k for k, v in id2label.items()}
elif checkpoint_url[-8:-4] == "to1k":
# fine-tuning on ImageNet-1k
config.use_relative_position_bias = True
config.num_labels = 1000
filename = "imagenet-1k-id2label.json"
id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
id2label = {int(k): v for k, v in id2label.items()}
config.id2label = id2label
config.label2id = {v: k for k, v in id2label.items()}
if "384" in checkpoint_url:
config.image_size = 384
if "512" in checkpoint_url:
config.image_size = 512
elif "ade20k" in checkpoint_url:
# fine-tuning
config.use_relative_position_bias = True
config.num_labels = 150
filename = "ade20k-id2label.json"
id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
id2label = {int(k): v for k, v in id2label.items()}
config.id2label = id2label
config.label2id = {v: k for k, v in id2label.items()}
config.image_size = 640
is_semantic = True
else:
raise ValueError("Checkpoint not supported, URL should either end with 'pt22k', 'ft22k', 'to1k' or 'ade20k'")
# size of the architecture
if "base" in checkpoint_url:
pass
elif "large" in checkpoint_url:
config.hidden_size = 1024
config.intermediate_size = 4096
config.num_hidden_layers = 24
config.num_attention_heads = 16
if "ade20k" in checkpoint_url:
config.image_size = 640
config.out_indices = [7, 11, 15, 23]
else:
raise ValueError("Should either find 'base' or 'large' in checkpoint URL")
# load state_dict of original model, remove and rename some keys
state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu", check_hash=True)
state_dict = state_dict["model"] if "ade20k" not in checkpoint_url else state_dict["state_dict"]
rename_keys = create_rename_keys(config, has_lm_head=has_lm_head, is_semantic=is_semantic)
for src, dest in rename_keys:
rename_key(state_dict, src, dest)
read_in_q_k_v(state_dict, config, has_lm_head=has_lm_head, is_semantic=is_semantic)
if is_semantic:
# add prefix to decoder keys
for key, val in state_dict.copy().items():
val = state_dict.pop(key)
if key.startswith("backbone.fpn"):
key = key.replace("backbone.fpn", "fpn")
state_dict[key] = val
# load HuggingFace model
if checkpoint_url[-9:-4] == "pt22k":
model = BeitForMaskedImageModeling(config)
elif "ade20k" in checkpoint_url:
model = BeitForSemanticSegmentation(config)
else:
model = BeitForImageClassification(config)
model.eval()
model.load_state_dict(state_dict)
# Check outputs on an image
if is_semantic:
image_processor = BeitImageProcessor(size=config.image_size, do_center_crop=False)
ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test")
image = Image.open(ds[0]["file"])
else:
image_processor = BeitImageProcessor(
size=config.image_size, resample=PILImageResampling.BILINEAR, do_center_crop=False
)
image = prepare_img()
encoding = image_processor(images=image, return_tensors="pt")
pixel_values = encoding["pixel_values"]
outputs = model(pixel_values)
logits = outputs.logits
# verify logits
expected_shape = torch.Size([1, 1000])
if checkpoint_url[:-4].endswith("beit_base_patch16_224_pt22k"):
expected_shape = torch.Size([1, 196, 8192])
elif checkpoint_url[:-4].endswith("beit_large_patch16_224_pt22k"):
expected_shape = torch.Size([1, 196, 8192])
elif checkpoint_url[:-4].endswith("beit_base_patch16_224_pt22k_ft22k"):
expected_shape = torch.Size([1, 21841])
expected_logits = torch.tensor([2.2288, 2.4671, 0.7395])
expected_class_idx = 2397
elif checkpoint_url[:-4].endswith("beit_large_patch16_224_pt22k_ft22k"):
expected_shape = torch.Size([1, 21841])
expected_logits = torch.tensor([1.6881, -0.2787, 0.5901])
expected_class_idx = 2396
elif checkpoint_url[:-4].endswith("beit_base_patch16_224_pt22k_ft1k"):
expected_logits = torch.tensor([0.1241, 0.0798, -0.6569])
expected_class_idx = 285
elif checkpoint_url[:-4].endswith("beit_base_patch16_224_pt22k_ft22kto1k"):
expected_logits = torch.tensor([-1.2385, -1.0987, -1.0108])
expected_class_idx = 281
elif checkpoint_url[:-4].endswith("beit_base_patch16_384_pt22k_ft22kto1k"):
expected_logits = torch.tensor([-1.5303, -0.9484, -0.3147])
expected_class_idx = 761
elif checkpoint_url[:-4].endswith("beit_large_patch16_224_pt22k_ft1k"):
expected_logits = torch.tensor([0.4610, -0.0928, 0.2086])
expected_class_idx = 761
elif checkpoint_url[:-4].endswith("beit_large_patch16_224_pt22k_ft22kto1k"):
expected_logits = torch.tensor([-0.4804, 0.6257, -0.1837])
expected_class_idx = 761
elif checkpoint_url[:-4].endswith("beit_large_patch16_384_pt22k_ft22kto1k"):
expected_logits = torch.tensor([[-0.5122, 0.5117, -0.2113]])
expected_class_idx = 761
elif checkpoint_url[:-4].endswith("beit_large_patch16_512_pt22k_ft22kto1k"):
expected_logits = torch.tensor([-0.3062, 0.7261, 0.4852])
expected_class_idx = 761
elif checkpoint_url[:-4].endswith("beit_base_patch16_640_pt22k_ft22ktoade20k"):
expected_shape = (1, 150, 160, 160)
expected_logits = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
]
)
elif checkpoint_url[:-4].endswith("beit_large_patch16_640_pt22k_ft22ktoade20k"):
expected_shape = (1, 150, 160, 160)
expected_logits = torch.tensor(
[
[[-4.3305, -2.3049, -3.0161], [-2.9591, -1.5305, -2.2251], [-3.4198, -1.8004, -2.9062]],
[[-5.8922, -3.7435, -4.3978], [-4.2063, -2.7872, -3.4755], [-4.2791, -3.1874, -4.1681]],
[[0.9895, 4.3467, 4.7663], [4.2476, 5.6830, 6.1518], [4.5550, 6.2495, 6.5154]],
]
)
else:
raise ValueError("Can't verify logits as model is not supported")
if logits.shape != expected_shape:
raise ValueError(f"Shape of logits not as expected. {logits.shape=}, {expected_shape=}")
if not has_lm_head:
if is_semantic:
if not torch.allclose(logits[0, :3, :3, :3], expected_logits, atol=1e-3):
raise ValueError("First elements of logits not as expected")
else:
print("Predicted class idx:", logits.argmax(-1).item())
if not torch.allclose(logits[0, :3], expected_logits, atol=1e-3):
raise ValueError("First elements of logits not as expected")
if logits.argmax(-1).item() != expected_class_idx:
raise ValueError("Predicted class index not as expected")
Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
print(f"Saving model to {pytorch_dump_folder_path}")
model.save_pretrained(pytorch_dump_folder_path)
print(f"Saving image processor to {pytorch_dump_folder_path}")
image_processor.save_pretrained(pytorch_dump_folder_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_base_patch16_224_pt22k_ft22kto1k.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
args = parser.parse_args()
convert_beit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/beit/__init__.py | src/transformers/models/beit/__init__.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_beit import *
from .feature_extraction_beit import *
from .image_processing_beit import *
from .image_processing_beit_fast import *
from .modeling_beit import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/beit/modeling_beit.py | src/transformers/models/beit/modeling_beit.py | # coding=utf-8
# Copyright 2021 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BEiT model."""
import collections.abc
import math
from dataclasses import dataclass
from typing import Optional, Union
import torch
from torch import Tensor, nn
from torch.nn import CrossEntropyLoss
from ... import initialization as init
from ...activations import ACT2FN
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutput,
BaseModelOutputWithPooling,
ImageClassifierOutput,
MaskedLMOutput,
SemanticSegmenterOutput,
)
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import compile_compatible_method_lru_cache
from ...utils import auto_docstring, logging, torch_int
from ...utils.backbone_utils import BackboneMixin
from .configuration_beit import BeitConfig
logger = logging.get_logger(__name__)
@dataclass
@auto_docstring(
custom_intro="""
Class for outputs of [`BeitModel`].
"""
)
class BeitModelOutputWithPooling(BaseModelOutputWithPooling):
r"""
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
Average of the last layer hidden states of the patch tokens (excluding the *[CLS]* token) if
*config.use_mean_pooling* is set to True. If set to False, then the final hidden state of the *[CLS]* token
will be returned.
"""
def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
if drop_prob == 0.0 or not training:
return input
keep_prob = 1 - drop_prob
shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
random_tensor.floor_() # binarize
output = input.div(keep_prob) * random_tensor
return output
class BeitDropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob: Optional[float] = None) -> None:
super().__init__()
self.drop_prob = drop_prob
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return drop_path(hidden_states, self.drop_prob, self.training)
def extra_repr(self) -> str:
return f"p={self.drop_prob}"
# Based on timm implementation, which can be found here:
# https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
class BeitEmbeddings(nn.Module):
"""
Construct the CLS token, position and patch embeddings. Optionally, also the mask token.
"""
def __init__(self, config: BeitConfig) -> None:
super().__init__()
self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
if config.use_mask_token:
self.mask_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
else:
self.mask_token = None
self.patch_embeddings = BeitPatchEmbeddings(config)
self.patch_size = config.patch_size
self.image_size = (
config.image_size
if isinstance(config.image_size, collections.abc.Iterable)
else (config.image_size, config.image_size)
)
num_patches = self.patch_embeddings.num_patches
if config.use_absolute_position_embeddings:
self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.hidden_size))
else:
self.position_embeddings = None
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# Copied from transformers.models.vit.modeling_vit.ViTEmbeddings.interpolate_pos_encoding
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
"""
num_patches = embeddings.shape[1] - 1
num_positions = self.position_embeddings.shape[1] - 1
# always interpolate when tracing to ensure the exported model works for dynamic input shapes
if not torch.jit.is_tracing() and num_patches == num_positions and height == width:
return self.position_embeddings
class_pos_embed = self.position_embeddings[:, :1]
patch_pos_embed = self.position_embeddings[:, 1:]
dim = embeddings.shape[-1]
new_height = height // self.patch_size
new_width = width // self.patch_size
sqrt_num_positions = torch_int(num_positions**0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed,
size=(new_height, new_width),
mode="bicubic",
align_corners=False,
)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed, patch_pos_embed), dim=1)
def forward(
self,
pixel_values: torch.Tensor,
bool_masked_pos: Optional[torch.BoolTensor] = None,
) -> torch.Tensor:
_, _, height, width = pixel_values.shape
embeddings, (patch_height, patch_width) = self.patch_embeddings(pixel_values)
batch_size, seq_len, _ = embeddings.size()
if bool_masked_pos is not None:
mask_tokens = self.mask_token.expand(batch_size, seq_len, -1)
# replace the masked visual tokens by mask_tokens
w = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
embeddings = embeddings * (1 - w) + mask_tokens * w
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
embeddings = torch.cat((cls_tokens, embeddings), dim=1)
if self.position_embeddings is not None:
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
embeddings = self.dropout(embeddings)
return embeddings, (patch_height, patch_width)
class BeitPatchEmbeddings(nn.Module):
"""
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
Transformer.
"""
def __init__(self, config):
super().__init__()
image_size, patch_size = config.image_size, config.patch_size
num_channels, hidden_size = config.num_channels, config.hidden_size
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
patch_shape = (image_size[0] // patch_size[0], image_size[1] // patch_size[1])
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.num_patches = num_patches
self.patch_shape = patch_shape
self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
batch_size, num_channels, height, width = pixel_values.shape
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
)
embeddings = self.projection(pixel_values.to(self.projection.weight.dtype))
patch_height, patch_width = embeddings.shape[2], embeddings.shape[3]
embeddings = embeddings.flatten(2).transpose(1, 2)
return embeddings, (patch_height, patch_width)
class BeitSelfAttention(nn.Module):
def __init__(self, config: BeitConfig, window_size: Optional[tuple] = None) -> None:
super().__init__()
self.config = config
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size {config.hidden_size} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=False)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.has_relative_position_bias = bool(window_size)
if self.has_relative_position_bias:
self.relative_position_bias = BeitRelativePositionBias(config, window_size=window_size)
def forward(
self,
hidden_states: torch.Tensor,
output_attentions: bool = False,
relative_position_bias: Optional[torch.Tensor] = None,
interpolate_pos_encoding: bool = False,
resolution: Optional[tuple[int]] = None,
) -> Union[tuple[torch.Tensor], tuple[torch.Tensor, torch.Tensor]]:
batch_size, seq_length, _ = hidden_states.shape
query_layer = (
self.query(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
key_layer = (
self.key(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
value_layer = (
self.value(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Add relative position bias if present.
if self.has_relative_position_bias:
height, width = resolution
window_size = (height // self.config.patch_size, width // self.config.patch_size)
attention_scores = attention_scores + self.relative_position_bias(
window_size, interpolate_pos_encoding, dim_size=hidden_states.shape[1]
)
# Add shared relative position bias if provided.
if relative_position_bias is not None:
attention_scores = attention_scores + relative_position_bias
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class BeitSdpaSelfAttention(BeitSelfAttention):
def forward(
self,
hidden_states: torch.Tensor,
output_attentions: bool = False,
relative_position_bias: Optional[torch.Tensor] = None,
interpolate_pos_encoding: bool = False,
resolution: Optional[tuple[int]] = None,
) -> Union[tuple[torch.Tensor], tuple[torch.Tensor, torch.Tensor]]:
if output_attentions:
logger.warning_once(
f"{self.__class__.__name__} does not support `output_attentions=True`. The returned attention weights will "
"be `None`. If you want to get attention weights, please set `attn_implementation='eager'` when loading the model."
)
batch_size, seq_length, _ = hidden_states.shape
query_layer = (
self.query(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
key_layer = (
self.key(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
value_layer = (
self.value(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
attn_bias = None
if self.has_relative_position_bias:
height, width = resolution
window_size = (height // self.config.patch_size, width // self.config.patch_size)
attn_bias = self.relative_position_bias(
window_size, interpolate_pos_encoding, dim_size=hidden_states.shape[1]
)
# Add shared relative position bias if provided.
if relative_position_bias is not None:
if attn_bias is None:
attn_bias = relative_position_bias
else:
attn_bias += relative_position_bias
scaling = 1 / math.sqrt(self.attention_head_size)
context_layer = torch.nn.functional.scaled_dot_product_attention(
query_layer,
key_layer,
value_layer,
attn_mask=attn_bias,
dropout_p=self.config.attention_probs_dropout_prob if self.training else 0.0,
is_causal=False,
scale=scaling,
)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer, None
class BeitSelfOutput(nn.Module):
"""
The residual connection is defined in BeitLayer instead of here (as is the case with other models), due to the
layernorm applied before each block.
"""
def __init__(self, config: BeitConfig) -> None:
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor, gamma=None) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
BEIT_SELF_ATTENTION_CLASSES = {
"eager": BeitSelfAttention,
"sdpa": BeitSdpaSelfAttention,
}
class BeitAttention(nn.Module):
def __init__(self, config: BeitConfig, window_size: Optional[tuple] = None) -> None:
super().__init__()
self.attention = BEIT_SELF_ATTENTION_CLASSES[config._attn_implementation](config, window_size=window_size)
self.output = BeitSelfOutput(config)
def forward(
self,
hidden_states: torch.Tensor,
output_attentions: bool = False,
relative_position_bias: Optional[torch.Tensor] = None,
interpolate_pos_encoding: bool = False,
resolution: Optional[tuple[int]] = None,
) -> Union[tuple[torch.Tensor], tuple[torch.Tensor, torch.Tensor]]:
self_outputs = self.attention(
hidden_states, output_attentions, relative_position_bias, interpolate_pos_encoding, resolution
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class BeitIntermediate(nn.Module):
def __init__(self, config: BeitConfig) -> None:
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BeitOutput(nn.Module):
def __init__(self, config: BeitConfig) -> None:
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
class BeitLayer(GradientCheckpointingLayer):
"""This corresponds to the Block class in the timm implementation."""
def __init__(self, config: BeitConfig, window_size: Optional[tuple] = None, drop_path_rate: float = 0.0) -> None:
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BeitAttention(config, window_size=window_size)
self.intermediate = BeitIntermediate(config)
self.output = BeitOutput(config)
self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.drop_path = BeitDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
init_values = config.layer_scale_init_value
if init_values > 0:
self.lambda_1 = nn.Parameter(init_values * torch.ones(config.hidden_size), requires_grad=True)
self.lambda_2 = nn.Parameter(init_values * torch.ones(config.hidden_size), requires_grad=True)
else:
self.lambda_1, self.lambda_2 = None, None
def forward(
self,
hidden_states: torch.Tensor,
output_attentions: bool = False,
relative_position_bias: Optional[torch.Tensor] = None,
interpolate_pos_encoding: bool = False,
resolution: Optional[tuple[int, int]] = None,
) -> Union[tuple[torch.Tensor], tuple[torch.Tensor, torch.Tensor]]:
self_attention_outputs = self.attention(
self.layernorm_before(hidden_states), # in BEiT, layernorm is applied before self-attention
output_attentions=output_attentions,
relative_position_bias=relative_position_bias,
interpolate_pos_encoding=interpolate_pos_encoding,
resolution=resolution,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
# apply lambda_1 if present
if self.lambda_1 is not None:
attention_output = self.lambda_1 * attention_output
# first residual connection
hidden_states = self.drop_path(attention_output) + hidden_states
# in BEiT, layernorm is also applied after self-attention
layer_output = self.layernorm_after(hidden_states)
layer_output = self.intermediate(layer_output)
layer_output = self.output(layer_output)
if self.lambda_2 is not None:
layer_output = self.lambda_2 * layer_output
# second residual connection
layer_output = self.drop_path(layer_output) + hidden_states
outputs = (layer_output,) + outputs
return outputs
class BeitRelativePositionBias(nn.Module):
def __init__(self, config: BeitConfig, window_size: tuple) -> None:
super().__init__()
self.window_size = window_size
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
self.relative_position_bias_table = nn.Parameter(
torch.zeros(self.num_relative_distance, config.num_attention_heads)
) # 2*Wh-1 * 2*Ww-1, nH
# cls to token & token 2 cls & cls to cls
@compile_compatible_method_lru_cache(maxsize=10)
def generate_relative_position_index(self, window_size: tuple[int, int]) -> torch.Tensor:
"""
This method creates the relative position index, modified to support arbitrary window sizes,
as introduced in [MiDaS v3.1](https://huggingface.co/papers/2307.14460).
"""
num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
# cls to token & token 2 cls & cls to cls
# get pair-wise relative position index for each token inside the window
window_area = window_size[0] * window_size[1]
grid = torch.meshgrid(torch.arange(window_size[0]), torch.arange(window_size[1]), indexing="ij")
coords = torch.stack(grid) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += window_size[1] - 1
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
relative_position_index = torch.zeros(size=(window_area + 1,) * 2, dtype=relative_coords.dtype)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = num_relative_distance - 3
relative_position_index[0:, 0] = num_relative_distance - 2
relative_position_index[0, 0] = num_relative_distance - 1
return relative_position_index
def forward(self, window_size, interpolate_pos_encoding: bool = False, dim_size=None) -> torch.Tensor:
"""
Modification of timm.models.beit.py: Attention._get_rel_pos_bias to support arbitrary window sizes.
"""
old_height = 2 * self.window_size[0] - 1
old_width = 2 * self.window_size[1] - 1
new_height = 2 * window_size[0] - 1
new_width = 2 * window_size[1] - 1
old_relative_position_bias_table = self.relative_position_bias_table
old_num_relative_distance = self.num_relative_distance
new_num_relative_distance = new_height * new_width + 3
old_sub_table = old_relative_position_bias_table[: old_num_relative_distance - 3]
old_sub_table = old_sub_table.reshape(1, old_width, old_height, -1).permute(0, 3, 1, 2)
new_sub_table = nn.functional.interpolate(
old_sub_table, size=(torch_int(new_height), torch_int(new_width)), mode="bilinear"
)
new_sub_table = new_sub_table.permute(0, 2, 3, 1).reshape(new_num_relative_distance - 3, -1)
new_relative_position_bias_table = torch.cat(
[new_sub_table, old_relative_position_bias_table[old_num_relative_distance - 3 :]]
)
relative_position_index = self.generate_relative_position_index(window_size)
relative_position_bias = new_relative_position_bias_table[relative_position_index.view(-1)]
# patch_size*num_patches_height, patch_size*num_patches_width, num_attention_heads
relative_position_bias = relative_position_bias.view(
window_size[0] * window_size[1] + 1, window_size[0] * window_size[1] + 1, -1
)
# num_attention_heads, patch_size*num_patches_width, patch_size*num_patches_height
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous()
if interpolate_pos_encoding:
relative_position_bias = nn.functional.interpolate(
relative_position_bias.unsqueeze(1),
size=(dim_size, dim_size),
mode="bilinear",
align_corners=False,
).squeeze(1)
return relative_position_bias.unsqueeze(0)
class BeitEncoder(nn.Module):
def __init__(self, config: BeitConfig, window_size: Optional[tuple] = None) -> None:
super().__init__()
self.config = config
self.has_relative_position_bias = config.use_shared_relative_position_bias
if self.has_relative_position_bias:
self.relative_position_bias = BeitRelativePositionBias(config, window_size=window_size)
# stochastic depth decay rule
dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers, device="cpu")]
self.layer = nn.ModuleList(
[
BeitLayer(
config,
window_size=window_size if config.use_relative_position_bias else None,
drop_path_rate=dpr[i],
)
for i in range(config.num_hidden_layers)
]
)
self.gradient_checkpointing = False
def forward(
self,
hidden_states: torch.Tensor,
output_attentions: bool = False,
output_hidden_states: bool = False,
interpolate_pos_encoding: bool = False,
resolution: Optional[tuple[int, int]] = None,
return_dict: bool = True,
) -> Union[tuple, BaseModelOutput]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if self.has_relative_position_bias:
height, width = resolution
window_size = (height // self.config.patch_size, width // self.config.patch_size)
relative_position_bias = self.relative_position_bias(
window_size, interpolate_pos_encoding=interpolate_pos_encoding, dim_size=hidden_states.shape[1]
)
else:
relative_position_bias = None
layer_outputs = layer_module(
hidden_states,
output_attentions=output_attentions,
relative_position_bias=relative_position_bias,
interpolate_pos_encoding=interpolate_pos_encoding,
resolution=resolution,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
@auto_docstring
class BeitPreTrainedModel(PreTrainedModel):
config: BeitConfig
base_model_prefix = "beit"
input_modalities = ("image",)
main_input_name = "pixel_values"
supports_gradient_checkpointing = True
_no_split_modules = ["BeitLayer"]
_keys_to_ignore_on_load_unexpected = [r".*relative_position_index.*"]
_supports_sdpa = True
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
super()._init_weights(module)
if isinstance(module, BeitEmbeddings):
init.zeros_(module.cls_token)
if module.mask_token is not None:
init.zeros_(module.mask_token)
if module.position_embeddings is not None:
init.zeros_(module.position_embeddings)
elif isinstance(module, BeitRelativePositionBias):
init.zeros_(module.relative_position_bias_table)
elif isinstance(module, BeitLayer):
if module.lambda_1 is not None:
init.constant_(module.lambda_1, self.config.layer_scale_init_value)
init.constant_(module.lambda_2, self.config.layer_scale_init_value)
@auto_docstring
class BeitModel(BeitPreTrainedModel):
def __init__(self, config: BeitConfig, add_pooling_layer: bool = True) -> None:
r"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.embeddings = BeitEmbeddings(config)
self.encoder = BeitEncoder(config, window_size=self.embeddings.patch_embeddings.patch_shape)
self.layernorm = (
nn.Identity() if config.use_mean_pooling else nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
)
self.pooler = BeitPooler(config) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
@auto_docstring
def forward(
self,
pixel_values: torch.Tensor,
bool_masked_pos: Optional[torch.BoolTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
interpolate_pos_encoding: bool = False,
return_dict: Optional[bool] = None,
**kwargs,
) -> Union[tuple, BeitModelOutputWithPooling]:
r"""
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
embedding_output, _ = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos)
resolution = pixel_values.shape[2:]
encoder_outputs = self.encoder(
embedding_output,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
resolution=resolution,
return_dict=return_dict,
interpolate_pos_encoding=interpolate_pos_encoding,
)
sequence_output = encoder_outputs[0]
sequence_output = self.layernorm(sequence_output)
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
head_outputs = (sequence_output, pooled_output) if pooled_output is not None else (sequence_output,)
return head_outputs + encoder_outputs[1:]
return BeitModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
class BeitPooler(nn.Module):
def __init__(self, config: BeitConfig) -> None:
super().__init__()
self.layernorm = (
nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) if config.use_mean_pooling else None
)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | true |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/beit/image_processing_beit_fast.py | src/transformers/models/beit/image_processing_beit_fast.py | # coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for Beit."""
from typing import Optional, Union
import torch
from torchvision.transforms.v2 import functional as F
from ...image_processing_utils import BatchFeature
from ...image_processing_utils_fast import (
BaseImageProcessorFast,
group_images_by_shape,
reorder_images,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
SizeDict,
is_torch_tensor,
)
from ...processing_utils import Unpack
from ...utils import (
TensorType,
auto_docstring,
)
from .image_processing_beit import BeitImageProcessorKwargs
@auto_docstring
class BeitImageProcessorFast(BaseImageProcessorFast):
resample = PILImageResampling.BICUBIC
image_mean = IMAGENET_STANDARD_MEAN
image_std = IMAGENET_STANDARD_STD
size = {"height": 224, "width": 224}
default_to_square = True
crop_size = {"height": 224, "width": 224}
do_resize = True
do_center_crop = False
do_rescale = True
do_normalize = True
do_reduce_labels = False
valid_kwargs = BeitImageProcessorKwargs
def __init__(self, **kwargs: Unpack[BeitImageProcessorKwargs]):
super().__init__(**kwargs)
def reduce_label(self, labels: list["torch.Tensor"]):
for idx in range(len(labels)):
label = labels[idx]
label = torch.where(label == 0, torch.tensor(255, dtype=label.dtype), label)
label = label - 1
label = torch.where(label == 254, torch.tensor(255, dtype=label.dtype), label)
labels[idx] = label
return label
@auto_docstring
def preprocess(
self,
images: ImageInput,
segmentation_maps: Optional[ImageInput] = None,
**kwargs: Unpack[BeitImageProcessorKwargs],
) -> BatchFeature:
r"""
segmentation_maps (`ImageInput`, *optional*):
The segmentation maps to preprocess.
"""
return super().preprocess(images, segmentation_maps, **kwargs)
def _preprocess_image_like_inputs(
self,
images: ImageInput,
segmentation_maps: Optional[ImageInput],
do_convert_rgb: bool,
input_data_format: ChannelDimension,
device: Optional[Union[str, "torch.device"]] = None,
**kwargs: Unpack[BeitImageProcessorKwargs],
) -> BatchFeature:
"""
Preprocess image-like inputs.
"""
images = self._prepare_image_like_inputs(
images=images, do_convert_rgb=do_convert_rgb, input_data_format=input_data_format, device=device
)
images_kwargs = kwargs.copy()
images_kwargs["do_reduce_labels"] = False
batch_feature = self._preprocess(images, **images_kwargs)
if segmentation_maps is not None:
processed_segmentation_maps = self._prepare_image_like_inputs(
images=segmentation_maps,
expected_ndims=2,
do_convert_rgb=False,
input_data_format=ChannelDimension.FIRST,
)
segmentation_maps_kwargs = kwargs.copy()
segmentation_maps_kwargs.update({"do_normalize": False, "do_rescale": False})
processed_segmentation_maps = self._preprocess(
images=processed_segmentation_maps, **segmentation_maps_kwargs
).pixel_values
batch_feature["labels"] = processed_segmentation_maps.squeeze(1).to(torch.int64)
return batch_feature
def _preprocess(
self,
images: list["torch.Tensor"],
do_reduce_labels: bool,
do_resize: bool,
size: SizeDict,
interpolation: Optional["F.InterpolationMode"],
do_center_crop: bool,
crop_size: SizeDict,
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: Optional[Union[float, list[float]]],
image_std: Optional[Union[float, list[float]]],
disable_grouping: Optional[bool],
return_tensors: Optional[Union[str, TensorType]],
**kwargs,
) -> BatchFeature:
if do_reduce_labels:
images = self.reduce_label(images)
# Group images by size for batched resizing
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
resized_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_resize:
stacked_images = self.resize(image=stacked_images, size=size, interpolation=interpolation)
resized_images_grouped[shape] = stacked_images
resized_images = reorder_images(resized_images_grouped, grouped_images_index)
# Group images by size for further processing
# Needed in case do_resize is False, or resize returns images with different sizes
grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_center_crop:
stacked_images = self.center_crop(stacked_images, crop_size)
# Fused rescale and normalize
stacked_images = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors)
def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[list[tuple]] = None):
"""
Converts the output of [`BeitForSemanticSegmentation`] into semantic segmentation maps.
Args:
outputs ([`BeitForSemanticSegmentation`]):
Raw outputs of the model.
target_sizes (`list[Tuple]` of length `batch_size`, *optional*):
List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,
predictions will not be resized.
Returns:
semantic_segmentation: `list[torch.Tensor]` of length `batch_size`, where each item is a semantic
segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
specified). Each entry of each `torch.Tensor` correspond to a semantic class id.
"""
logits = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(logits) != len(target_sizes):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
)
if is_torch_tensor(target_sizes):
target_sizes = target_sizes.numpy()
semantic_segmentation = []
for idx in range(len(logits)):
resized_logits = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False
)
semantic_map = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(semantic_map)
else:
semantic_segmentation = logits.argmax(dim=1)
semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
__all__ = ["BeitImageProcessorFast"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/mlcd/convert_mlcd_weights_to_hf.py | src/transformers/models/mlcd/convert_mlcd_weights_to_hf.py | # coding=utf-8
# Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert MLCD checkpoints from the original repository.
URL: https://github.com/deepglint/unicom/tree/main
"""
import argparse
import collections
import os
import re
import numpy as np
import requests
import torch
from PIL import Image
from transformers import CLIPImageProcessor
from ...utils import logging
from .configuration_mlcd import MLCDVisionConfig
from .modeling_mlcd import MLCDVisionModel
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
COMMON_CONFIG_PARAMS = {
"mlcd-vit-bigG-patch14-336": {
"hidden_size": 1664,
"image_size": 336,
"intermediate_size": 8192,
"num_attention_heads": 16,
"num_hidden_layers": 48,
"patch_size": 14,
"projection_dim": 1024,
},
"mlcd-vit-bigG-patch14-448": {
"hidden_size": 1664,
"image_size": 448,
"intermediate_size": 8192,
"num_attention_heads": 16,
"num_hidden_layers": 48,
"patch_size": 14,
"projection_dim": 1024,
},
}
MODEL_NAME_TO_CHECKPOINT_PATH = {
# base checkpoints
"mlcd-vit-bigG-patch14-336": "MLCD_ViT_bigG_14_336px_pytorch.pt",
"mlcd-vit-bigG-patch14-448": "MLCD_ViT_bigG_14_448px_pytorch.pt",
}
# fmt: off
EXPECTED_OUTPUTS = {
"mlcd-vit-bigG-patch14-336": torch.tensor([
[-0.8921, -0.1069, 0.2989, 0.6018, -0.5892],
[ 0.4093, -1.4592, 0.6048, -0.5147, -0.5929],
[ 0.7796, -0.7133, -0.5649, -0.7843, -0.5548],
[ 0.0041, 0.0286, 0.4310, -0.1403, -0.2399],
[ 0.0839, 0.2152, -0.3822, -0.1668, -0.7886]
]),
"mlcd-vit-bigG-patch14-448": torch.tensor([
[-0.8978, -0.1181, 0.4769, 0.4761, -0.5779],
[ 0.2640, -2.6150, 0.4853, 0.5743, -1.1003],
[ 0.3314, -0.3328, -0.4305, -0.1874, -0.7701],
[-1.5174, -1.0238, -1.1854, 0.1749, -0.8786],
[ 0.2323, -0.8346, -0.9680, -0.2951, 0.0867],
]),
}
# fmt: on
# fmt: off
ORIGINAL_TO_CONVERTED_KEY_MAPPING = {
# Vision embeddings
r"conv1.weight": r"vision_model.embeddings.patch_embedding.weight",
r"class_embedding": r"vision_model.embeddings.class_embedding",
r"vision_rotary_embedding": r"vision_model.vision_rotary_embedding",
r"class_pos_emb": r"vision_model.class_pos_emb",
# Vision encoder
r"transformer.resblocks_(\d+).ln_1.weight": r"vision_model.encoder.layers.\1.layer_norm1.weight",
r"transformer.resblocks_(\d+).ln_1.bias": r"vision_model.encoder.layers.\1.layer_norm1.bias",
r"transformer.resblocks_(\d+).ln_2.weight": r"vision_model.encoder.layers.\1.layer_norm2.weight",
r"transformer.resblocks_(\d+).ln_2.bias": r"vision_model.encoder.layers.\1.layer_norm2.bias",
r"transformer.resblocks_(\d+).mlp.c_fc.weight": r"vision_model.encoder.layers.\1.mlp.fc1.weight",
r"transformer.resblocks_(\d+).mlp.c_fc.bias": r"vision_model.encoder.layers.\1.mlp.fc1.bias",
r"transformer.resblocks_(\d+).mlp.c_proj.weight": r"vision_model.encoder.layers.\1.mlp.fc2.weight",
r"transformer.resblocks_(\d+).mlp.c_proj.bias": r"vision_model.encoder.layers.\1.mlp.fc2.bias",
r"transformer.resblocks_(\d+).attn.(q|k|v|out)_proj.weight": r"vision_model.encoder.layers.\1.self_attn.\2_proj.weight",
r"transformer.resblocks_(\d+).attn.(q|k|v|out)_proj.bias": r"vision_model.encoder.layers.\1.self_attn.\2_proj.bias",
# Vision norm
r"ln_post.weight": r"vision_model.post_layernorm.weight",
r"ln_post.bias": r"vision_model.post_layernorm.bias",
r"ln_pre.weight": r"vision_model.pre_layernorm.weight",
r"ln_pre.bias": r"vision_model.pre_layernorm.bias",
}
# fmt: on
# --------------------------------------------------------------------------------------------
# Model objects: configuration, image processor
# --------------------------------------------------------------------------------------------
def get_mlcd_config(model_name: str) -> MLCDVisionConfig:
"""
Create a configuration for the MLCD model based on the model name.
"""
assert model_name in COMMON_CONFIG_PARAMS, f"Model {model_name} not found in the list of COMMON_CONFIG_PARAMS."
config_params = COMMON_CONFIG_PARAMS[model_name]
config = MLCDVisionConfig(
hidden_size=config_params["hidden_size"],
image_size=config_params["image_size"],
intermediate_size=config_params["intermediate_size"],
num_attention_heads=config_params["num_attention_heads"],
num_hidden_layers=config_params["num_hidden_layers"],
patch_size=config_params["patch_size"],
projection_dim=config_params["projection_dim"],
)
return config
def get_mlcd_image_processor(model_name: str) -> CLIPImageProcessor:
"""
Create an image processor for the MLCD model based on the model name.
"""
assert model_name in COMMON_CONFIG_PARAMS, f"Model {model_name} not found in the list of COMMON_CONFIG_PARAMS."
config_params = COMMON_CONFIG_PARAMS[model_name]
image_processor = CLIPImageProcessor(
do_center_crop=True,
do_normalize=True,
do_resize=True,
feature_extractor_type="CLIPFeatureExtractor",
image_mean=[0.48145466, 0.4578275, 0.40821073],
image_std=[0.26862954, 0.26130258, 0.27577711],
resample=3,
size=config_params["image_size"],
crop_size=config_params["image_size"],
)
return image_processor
# --------------------------------------------------------------------------------------------
# Helper functions for state dict conversion
# --------------------------------------------------------------------------------------------
def flatten_nested_dict(params: dict, parent_key: str = "", sep: str = ".") -> dict:
"""
Flatten a nested original checkpoint dictionary into a flat dictionary.
"""
items = []
for k, v in params.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.abc.MutableMapping):
items.extend(flatten_nested_dict(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def split_resblocks_layers(state_dict: dict) -> dict:
"""
Split the resblocks weight into layers. In some cases they are concatenated in
the original checkpoints.
"""
# Make shallow copy
state_dict = state_dict.copy()
# Split resblocks weight into layers
keys = list(state_dict.keys())
for key in keys:
if ".resblocks." in key:
weight = state_dict.pop(key)
for i, weight_i in enumerate(weight):
new_name = key.replace("resblocks", f"resblocks_{i}")
state_dict[new_name] = weight_i
return state_dict
def chunk_qkv_for_attn(state_dict: dict) -> dict:
"""
Chunk the q/k/v weights and biases for the attention layers.
"""
# Make shallow copy
state_dict = state_dict.copy()
# Read and process q/k/v weights and biases
keys = list(state_dict.keys())
for key in keys:
if ".in_proj." in key:
weight = state_dict.pop(key)
qkv_weights = weight.chunk(3, dim=0)
for name, weight_i in zip(["q_proj", "k_proj", "v_proj"], qkv_weights):
new_name = key.replace("in_proj", name)
state_dict[new_name] = weight_i
return state_dict
def convert_old_keys_to_new_keys(state_dict_keys: list) -> dict:
"""
This function should be applied only once, on the concatenated keys to efficiently rename using
the key mappings.
"""
output_dict = {}
if state_dict_keys is not None:
old_text = "\n".join(state_dict_keys)
new_text = old_text
for pattern, replacement in ORIGINAL_TO_CONVERTED_KEY_MAPPING.items():
if replacement is None:
new_text = re.sub(pattern, "", new_text) # an empty line
continue
new_text = re.sub(pattern, replacement, new_text)
output_dict = dict(zip(old_text.split("\n"), new_text.split("\n")))
return output_dict
# --------------------------------------------------------------------------------------------
# Convert model
# --------------------------------------------------------------------------------------------
@torch.no_grad()
def convert_mlcd_checkpoint(model_name, input_dir, output_dir, verify_hidden_state=True, push_to_hub=False):
"""
Copy/paste/tweak model's weights to our MLCD structure.
"""
# Define MLCD configuration
config = get_mlcd_config(model_name)
checkpoint = MODEL_NAME_TO_CHECKPOINT_PATH[model_name]
checkpoint_path = os.path.join(input_dir, checkpoint)
assert os.path.exists(checkpoint_path), f"Checkpoint path ({checkpoint_path}) not found."
# Load original checkpoint
print(f"Loading checkpoint from {checkpoint_path}...")
state_dict = torch.load(checkpoint_path, "cpu")
# Flatten nested dictionary
print("Flattening nested dictionary...")
state_dict = {k.replace("_orig_mod.", ""): v for k, v in state_dict.items()}
if "positional_embedding" in state_dict:
state_dict.pop("positional_embedding")
state_dict = flatten_nested_dict(state_dict)
state_dict = split_resblocks_layers(state_dict)
state_dict = chunk_qkv_for_attn(state_dict)
# Rename and transform weights
print("Renaming and transforming weights...")
original_keys = list(state_dict.keys())
hf_keys = convert_old_keys_to_new_keys(original_keys)
new_state_dict = {}
for original_key in original_keys:
new_key = hf_keys[original_key]
parameter = state_dict.pop(original_key)
new_state_dict[new_key] = torch.from_numpy(parameter)
# load HuggingFace model
print("Loading HuggingFace model...")
model = MLCDVisionModel(config).eval()
model.load_state_dict(new_state_dict)
# Create processor
print("Creating processor...")
image_processor = get_mlcd_image_processor(model_name)
# Verify hidden state
if verify_hidden_state:
print("Verifying hidden state for {model_name}...")
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
image = Image.open(requests.get(url, stream=True).raw)
pixel_values = image_processor(image, return_tensors="pt")["pixel_values"]
last_hidden_state = model(pixel_values, output_hidden_states=True).last_hidden_state[0, :5, :5]
expected_hidden_state = EXPECTED_OUTPUTS[model_name]
np.testing.assert_allclose(last_hidden_state.cpu().numpy(), expected_hidden_state.numpy(), atol=1e-4)
# Save model
if output_dir is not None:
dst_dir = os.path.join(output_dir, model_name)
print(f"Saving model {model_name} to {dst_dir}...")
model.save_pretrained(dst_dir)
print(f"Saving processor to {dst_dir}...")
image_processor.save_pretrained(dst_dir)
if push_to_hub:
print(f"Pushing model and processor for {model_name} to the HuggingFace Hub...")
model.push_to_hub(f"deepglint-hf/{model_name}", private=True)
image_processor.push_to_hub(f"deepglint-hf/{model_name}", private=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="mlcd-vit-bigG-patch14-448",
type=str,
choices=MODEL_NAME_TO_CHECKPOINT_PATH.keys(),
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--input_dir",
default="mlcd/original",
help="Location of MLCD original weights",
)
parser.add_argument(
"--output_dir",
default="mlcd/checkpoint",
help="Location to write HF model and processor",
)
parser.add_argument(
"--verify_hidden_state",
action="store_true",
help="Whether to verify hidden_state against the original implementation.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model to the Hugging Face hub.",
)
args = parser.parse_args()
convert_mlcd_checkpoint(
args.model_name, args.input_dir, args.output_dir, args.verify_hidden_state, args.push_to_hub
)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/mlcd/modeling_mlcd.py | src/transformers/models/mlcd/modeling_mlcd.py | # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# This file was automatically generated from src/transformers/models/mlcd/modular_mlcd.py.
# Do NOT edit this file manually as any edits will be overwritten by the generation of
# the file from the modular. If any change should be done, please apply the change to the
# modular_mlcd.py file directly. One of our CI enforces this.
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Callable
from typing import Optional, Union
import torch
import torch.nn as nn
from ... import initialization as init
from ...activations import ACT2FN
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, torch_int
from ...utils.generic import check_model_inputs
from .configuration_mlcd import MLCDVisionConfig
class MLCDMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.activation_fn = ACT2FN[config.hidden_act]
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
class MLCDRotaryEmbedding(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, dim: int, theta: float = 10000.0) -> None:
super().__init__()
self.dim = dim
self.theta = theta
inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2, dtype=torch.float) / dim))
self.register_buffer("inv_freq", inv_freq, persistent=False)
def forward(self, num_patches_height: int, num_patches_width: int) -> torch.Tensor:
"""
Calculate the Rotary Position Embedding (RoPE) for MLCDVisionModel based on the grid size.
Args:
num_patches_height (int): Number of patches in the height dimension.
num_patches_width (int): Number of patches in the width dimension.
Returns:
torch.Tensor: Rotary positional embeddings for the given grid size.
"""
# Generate position IDs for height and width dimensions
hpos_ids = (
torch.arange(num_patches_height, device=self.inv_freq.device).unsqueeze(1).expand(-1, num_patches_width)
)
wpos_ids = (
torch.arange(num_patches_width, device=self.inv_freq.device).unsqueeze(0).expand(num_patches_height, -1)
)
# Flatten and stack the position IDs
pos_ids = torch.stack([hpos_ids.flatten(), wpos_ids.flatten()], dim=-1)
# Generate the full rotary positional embeddings for the maximum grid size
max_grid_size = max(num_patches_height, num_patches_width)
seq = torch.arange(max_grid_size, device=self.inv_freq.device, dtype=self.inv_freq.dtype)
rotary_pos_emb_full = torch.outer(seq, self.inv_freq)
# Select and flatten the embeddings based on the position IDs
rotary_pos_emb = rotary_pos_emb_full[pos_ids].flatten(1)
return rotary_pos_emb
class MLCDVisionEmbeddings(nn.Module):
def __init__(self, config: MLCDVisionConfig):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.image_size = config.image_size
self.patch_size = config.patch_size
self.class_embedding = nn.Parameter(torch.randn(self.embed_dim))
self.patch_embedding = nn.Conv2d(
in_channels=config.num_channels,
out_channels=self.embed_dim,
kernel_size=self.patch_size,
stride=self.patch_size,
bias=False,
)
self.num_patches = (self.image_size // self.patch_size) ** 2
self.num_positions = self.num_patches + 1
self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False)
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
"""
num_patches = embeddings.shape[1] - 1
position_embedding = self.position_embedding.weight.unsqueeze(0)
num_positions = position_embedding.shape[1] - 1
# always interpolate when tracing to ensure the exported model works for dynamic input shapes
if not torch.jit.is_tracing() and num_patches == num_positions and height == width:
return self.position_embedding(self.position_ids)
class_pos_embed = position_embedding[:, :1]
patch_pos_embed = position_embedding[:, 1:]
dim = embeddings.shape[-1]
new_height = height // self.patch_size
new_width = width // self.patch_size
sqrt_num_positions = torch_int(num_positions**0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed,
size=(new_height, new_width),
mode="bicubic",
align_corners=False,
)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed, patch_pos_embed), dim=1)
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
batch_size = pixel_values.shape[0]
target_dtype = self.patch_embedding.weight.dtype
# patch_embeds -> shape = [batch, width, grid, grid]
patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype))
patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
class_embeds = self.class_embedding.expand(batch_size, 1, -1)
embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
return embeddings
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def apply_rotary_pos_emb_vision(
q: torch.Tensor, k: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor
) -> tuple[torch.Tensor, torch.Tensor]:
orig_q_dtype = q.dtype
orig_k_dtype = k.dtype
q, k = q.float(), k.float()
cos, sin = cos.unsqueeze(-2).float(), sin.unsqueeze(-2).float()
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
q_embed = q_embed.to(orig_q_dtype)
k_embed = k_embed.to(orig_k_dtype)
return q_embed, k_embed
class MLCDAttention(nn.Module):
"""Multi-headed attention with RoPE. Refer to papers:
- Attention is all you need:
https://huggingface.co/papers/1706.03762
- RoFormer: Enhanced Transformer with Rotary Position Embedding:
https://huggingface.co/papers/2104.09864
"""
def __init__(self, config: MLCDVisionConfig):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
f" {self.num_heads})."
)
self.scale = self.head_dim**-0.5
self.dropout = config.attention_dropout
self.is_causal = False
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.num_key_value_groups = config.num_key_value_groups
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
"""Input shape: Batch x Time x Channel"""
batch_size, seq_length = hidden_states.shape[:-1]
# Each of shape: [batch_size, seq_length, num_heads, head_dim]
query_states = self.q_proj(hidden_states).reshape((batch_size, seq_length, self.num_heads, self.head_dim))
key_states = self.k_proj(hidden_states).reshape((batch_size, seq_length, self.num_heads, self.head_dim))
value_states = self.v_proj(hidden_states).reshape((batch_size, seq_length, self.num_heads, self.head_dim))
# Apply positional embeddings
cos = position_embeddings[0].unsqueeze(0).float()
sin = position_embeddings[1].unsqueeze(0).float()
query_states, key_states = apply_rotary_pos_emb_vision(query_states, key_states, cos, sin)
# Each of shape: [batch_size, num_heads, seq_length, head_dim]
query_states = query_states.permute(0, 2, 1, 3).contiguous()
key_states = key_states.permute(0, 2, 1, 3).contiguous()
value_states = value_states.permute(0, 2, 1, 3).contiguous()
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.dropout,
scaling=self.scale,
is_causal=self.is_causal,
**kwargs,
)
attn_output = attn_output.permute(1, 0, 2, 3).contiguous() # [seq_length, batch_size, num_heads, head_dim]
attn_output = attn_output.view(seq_length, batch_size, -1) # [seq_length, batch_size, embedding_dim]
attn_output = self.out_proj(attn_output)
attn_output = attn_output.permute(1, 0, 2).contiguous() # [batch_size, seq_length, embedding_dim]
return attn_output, attn_weights
class MLCDEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: MLCDVisionConfig):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attn = MLCDAttention(config)
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.mlp = MLCDMLP(config)
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.FloatTensor]:
"""
Args:
hidden_states (`torch.FloatTensor`):
Input to the layer of shape `(batch, seq_len, embed_dim)`.
Represents the hidden states from the previous layer or the input embeddings.
position_embeddings (`tuple[torch.Tensor, torch.Tensor]`):
A tuple of two tensors, each of shape `(batch, seq_len, embed_dim)`.
Represents absolute positional embeddings for the query and key in the attention mechanism.
attention_mask (`torch.FloatTensor`):
Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values.
"""
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
position_embeddings=position_embeddings,
attention_mask=attention_mask,
**kwargs,
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
class MLCDEncoder(nn.Module):
"""
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`MLCDEncoderLayer`].
Args:
config: MLCDVisionConfig
"""
def __init__(self, config: MLCDVisionConfig):
"""Overwrite dummy `MLCDConfig` to `MLCDVisionConfig`."""
super().__init__()
self.config = config
self.layers = nn.ModuleList([MLCDEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
inputs_embeds: torch.FloatTensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, BaseModelOutput]:
r"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
position_embeddings (`tuple[torch.Tensor, torch.Tensor]`):
A tuple of two tensors, each of shape `(batch, seq_len, embed_dim)`.
Represents absolute positional embeddings for the query and key in the attention mechanism.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
"""
hidden_states = inputs_embeds
for encoder_layer in self.layers:
hidden_states = encoder_layer(
hidden_states,
position_embeddings,
attention_mask,
**kwargs,
)
return BaseModelOutput(
last_hidden_state=hidden_states,
)
@auto_docstring
class MLCDPreTrainedModel(PreTrainedModel):
config: MLCDVisionConfig
base_model_prefix = "mlcd"
supports_gradient_checkpointing = True
accepts_loss_kwargs = False
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": MLCDEncoderLayer,
"attentions": MLCDAttention,
}
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
factor = self.config.initializer_factor
if isinstance(module, MLCDVisionEmbeddings):
factor = self.config.initializer_factor
init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor)
init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor)
init.copy_(module.position_ids, torch.arange(module.position_ids.shape[-1]).expand((1, -1)))
elif isinstance(module, MLCDAttention):
factor = self.config.initializer_factor
in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
out_proj_std = (module.embed_dim**-0.5) * factor
init.normal_(module.q_proj.weight, std=in_proj_std)
init.normal_(module.k_proj.weight, std=in_proj_std)
init.normal_(module.v_proj.weight, std=in_proj_std)
init.normal_(module.out_proj.weight, std=out_proj_std)
elif isinstance(module, MLCDMLP):
factor = self.config.initializer_factor
in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
fc_std = (2 * module.config.hidden_size) ** -0.5 * factor
init.normal_(module.fc1.weight, std=fc_std)
init.normal_(module.fc2.weight, std=in_proj_std)
elif isinstance(module, MLCDVisionTransformer):
factor = self.config.initializer_factor
pos_emb_std = (module.config.hidden_size // module.config.num_attention_heads // 2) ** -0.5 * factor
init.normal_(module.class_pos_emb, mean=0.0, std=pos_emb_std)
elif isinstance(module, nn.LayerNorm):
init.zeros_(module.bias)
init.ones_(module.weight)
elif isinstance(module, nn.Linear) and module.bias is not None:
init.zeros_(module.bias)
elif isinstance(module, MLCDRotaryEmbedding):
inv_freq = 1.0 / (module.theta ** (torch.arange(0, module.dim, 2, dtype=torch.float) / module.dim))
init.copy_(module.inv_freq, inv_freq)
class MLCDVisionTransformer(nn.Module):
def __init__(self, config: MLCDVisionConfig):
super().__init__()
self.config = config
embed_dim = config.hidden_size
self.embeddings = MLCDVisionEmbeddings(config)
self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
self.encoder = MLCDEncoder(config)
self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
self.vision_rotary_embedding = MLCDRotaryEmbedding(config.hidden_size // config.num_attention_heads // 2)
self.class_pos_emb = nn.Parameter(torch.randn(1, config.hidden_size // config.num_attention_heads // 2))
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, BaseModelOutputWithPooling]:
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
num_patches_height = pixel_values.shape[-2] // self.config.patch_size
num_patches_width = pixel_values.shape[-1] // self.config.patch_size
rotary_pos_emb = self.vision_rotary_embedding(num_patches_height, num_patches_width)
rotary_pos_emb = rotary_pos_emb.to(self.class_pos_emb.device)
rotary_pos_emb = torch.cat([self.class_pos_emb, rotary_pos_emb], dim=0)
emb = torch.cat((rotary_pos_emb, rotary_pos_emb), dim=-1)
position_embeddings = (emb.cos(), emb.sin())
hidden_states = self.embeddings(pixel_values)
hidden_states = self.pre_layrnorm(hidden_states)
encoder_outputs = self.encoder(
inputs_embeds=hidden_states,
position_embeddings=position_embeddings,
**kwargs,
)
last_hidden_state = encoder_outputs[0]
pooled_output = last_hidden_state[:, 0, :]
pooled_output = self.post_layernorm(pooled_output)
return BaseModelOutputWithPooling(
last_hidden_state=last_hidden_state,
pooler_output=pooled_output,
)
@auto_docstring(
custom_intro="""
The vision model from M_L_C_D without any head or projection on top.
"""
)
class MLCDVisionModel(MLCDPreTrainedModel):
config: MLCDVisionConfig
main_input_name = "pixel_values"
input_modalities = ("image",)
_no_split_modules = ["MLCDEncoderLayer"]
def __init__(self, config: MLCDVisionConfig):
super().__init__(config)
self.vision_model = MLCDVisionTransformer(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self) -> nn.Module:
return self.vision_model.embeddings.patch_embedding
@check_model_inputs(tie_last_hidden_states=False)
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, BaseModelOutputWithPooling]:
r"""
Example:
```python
>>> import requests
>>> from PIL import Image
>>> from transformers import AutoProcessor, MLCDVisionModel
>>> model = MLCDVisionModel.from_pretrained("DeepGlint-AI/mlcd-vit-bigG-patch14-448")
>>> processor = AutoProcessor.from_pretrained("DeepGlint-AI/mlcd-vit-bigG-patch14-448")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs, output_attentions=True)
>>> features = outputs.last_hidden_state
>>> print(f"Extracted features shape: {features.shape}")
>>> print(f"Number of attention layers: {len(outputs.attentions)}")
>>> print(f"Attention shape: {outputs.attentions[0].shape}")
```"""
return self.vision_model(
pixel_values=pixel_values,
**kwargs,
)
__all__ = ["MLCDPreTrainedModel", "MLCDVisionModel"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/mlcd/__init__.py | src/transformers/models/mlcd/__init__.py | # Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_mlcd import *
from .modeling_mlcd import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/mlcd/modular_mlcd.py | src/transformers/models/mlcd/modular_mlcd.py | # coding=utf-8
# Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Callable
from typing import Optional, Union
import torch
import torch.nn as nn
from ... import initialization as init
from ...configuration_utils import PreTrainedConfig
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, logging
from ...utils.generic import check_model_inputs
from ..clip.modeling_clip import (
CLIPMLP,
CLIPAttention,
CLIPEncoder,
CLIPEncoderLayer,
CLIPVisionEmbeddings,
CLIPVisionModel,
CLIPVisionTransformer,
)
from ..llama.modeling_llama import eager_attention_forward
from ..qwen2_vl.modeling_qwen2_vl import VisionRotaryEmbedding, apply_rotary_pos_emb_vision
logger = logging.get_logger(__name__)
class MLCDVisionConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`MLCDVisionModel`]. It is used to instantiate a MLCD
vision encoder according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the vision encoder of the MLCD
[DeepGlint-AI/mlcd-vit-bigG-patch14-336](https://huggingface.co/DeepGlint-AI/mlcd-vit-bigG-patch14-336) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 1664):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 8192):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
projection_dim (`int`, *optional*, defaults to 1024):
Dimensionality of text and vision projection layers.
num_hidden_layers (`int`, *optional*, defaults to 48):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
image_size (`int`, *optional*, defaults to 336):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 14):
The size (resolution) of each patch.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_factor (`float`, *optional*, defaults to 1.0):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
Example:
```python
>>> from transformers import MLCDVisionConfig, MLCDVisionModel
>>> # Initializing a MLCDVisionConfig with DeepGlint-AI/mlcd-vit-bigG-patch14-336 style configuration
>>> configuration = MLCDVisionConfig()
>>> # Initializing a MLCDVisionModel (with random weights) from the DeepGlint-AI/mlcd-vit-bigG-patch14-336 style configuration
>>> model = MLCDVisionModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "mlcd_vision_model"
base_config_key = "vision_config"
def __init__(
self,
hidden_size=1664,
intermediate_size=8192,
num_hidden_layers=48,
num_attention_heads=16,
num_key_value_groups=1,
num_channels=3,
image_size=336,
patch_size=14,
hidden_act="gelu",
layer_norm_eps=1e-5,
attention_dropout=0.0,
initializer_range=0.02,
initializer_factor=1.0,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_groups = num_key_value_groups
self.num_channels = num_channels
self.patch_size = patch_size
self.image_size = image_size
self.initializer_range = initializer_range
self.initializer_factor = initializer_factor
self.attention_dropout = attention_dropout
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
class MLCDMLP(CLIPMLP):
pass
class MLCDRotaryEmbedding(VisionRotaryEmbedding):
def forward(self, num_patches_height: int, num_patches_width: int) -> torch.Tensor:
"""
Calculate the Rotary Position Embedding (RoPE) for MLCDVisionModel based on the grid size.
Args:
num_patches_height (int): Number of patches in the height dimension.
num_patches_width (int): Number of patches in the width dimension.
Returns:
torch.Tensor: Rotary positional embeddings for the given grid size.
"""
# Generate position IDs for height and width dimensions
hpos_ids = (
torch.arange(num_patches_height, device=self.inv_freq.device).unsqueeze(1).expand(-1, num_patches_width)
)
wpos_ids = (
torch.arange(num_patches_width, device=self.inv_freq.device).unsqueeze(0).expand(num_patches_height, -1)
)
# Flatten and stack the position IDs
pos_ids = torch.stack([hpos_ids.flatten(), wpos_ids.flatten()], dim=-1)
# Generate the full rotary positional embeddings for the maximum grid size
max_grid_size = max(num_patches_height, num_patches_width)
seq = torch.arange(max_grid_size, device=self.inv_freq.device, dtype=self.inv_freq.dtype)
rotary_pos_emb_full = torch.outer(seq, self.inv_freq)
# Select and flatten the embeddings based on the position IDs
rotary_pos_emb = rotary_pos_emb_full[pos_ids].flatten(1)
return rotary_pos_emb
class MLCDVisionEmbeddings(CLIPVisionEmbeddings):
def __init__(self, config: MLCDVisionConfig):
super().__init__(config)
del self.position_embedding
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
batch_size = pixel_values.shape[0]
target_dtype = self.patch_embedding.weight.dtype
# patch_embeds -> shape = [batch, width, grid, grid]
patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype))
patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
class_embeds = self.class_embedding.expand(batch_size, 1, -1)
embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
return embeddings
class MLCDAttention(CLIPAttention):
"""Multi-headed attention with RoPE. Refer to papers:
- Attention is all you need:
https://huggingface.co/papers/1706.03762
- RoFormer: Enhanced Transformer with Rotary Position Embedding:
https://huggingface.co/papers/2104.09864
"""
def __init__(self, config: MLCDVisionConfig):
super().__init__(config)
self.num_key_value_groups = config.num_key_value_groups
self.is_causal = False
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
batch_size, seq_length = hidden_states.shape[:-1]
# Each of shape: [batch_size, seq_length, num_heads, head_dim]
query_states = self.q_proj(hidden_states).reshape((batch_size, seq_length, self.num_heads, self.head_dim))
key_states = self.k_proj(hidden_states).reshape((batch_size, seq_length, self.num_heads, self.head_dim))
value_states = self.v_proj(hidden_states).reshape((batch_size, seq_length, self.num_heads, self.head_dim))
# Apply positional embeddings
cos = position_embeddings[0].unsqueeze(0).float()
sin = position_embeddings[1].unsqueeze(0).float()
query_states, key_states = apply_rotary_pos_emb_vision(query_states, key_states, cos, sin)
# Each of shape: [batch_size, num_heads, seq_length, head_dim]
query_states = query_states.permute(0, 2, 1, 3).contiguous()
key_states = key_states.permute(0, 2, 1, 3).contiguous()
value_states = value_states.permute(0, 2, 1, 3).contiguous()
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.dropout,
scaling=self.scale,
is_causal=self.is_causal,
**kwargs,
)
attn_output = attn_output.permute(1, 0, 2, 3).contiguous() # [seq_length, batch_size, num_heads, head_dim]
attn_output = attn_output.view(seq_length, batch_size, -1) # [seq_length, batch_size, embedding_dim]
attn_output = self.out_proj(attn_output)
attn_output = attn_output.permute(1, 0, 2).contiguous() # [batch_size, seq_length, embedding_dim]
return attn_output, attn_weights
class MLCDEncoderLayer(CLIPEncoderLayer):
def __init__(self, config: MLCDVisionConfig):
super().__init__(config)
self.self_attn = MLCDAttention(config)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.FloatTensor]:
"""
Args:
hidden_states (`torch.FloatTensor`):
Input to the layer of shape `(batch, seq_len, embed_dim)`.
Represents the hidden states from the previous layer or the input embeddings.
position_embeddings (`tuple[torch.Tensor, torch.Tensor]`):
A tuple of two tensors, each of shape `(batch, seq_len, embed_dim)`.
Represents absolute positional embeddings for the query and key in the attention mechanism.
attention_mask (`torch.FloatTensor`):
Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values.
"""
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
position_embeddings=position_embeddings,
attention_mask=attention_mask,
**kwargs,
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
class MLCDEncoder(CLIPEncoder):
"""
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`MLCDEncoderLayer`].
Args:
config: MLCDVisionConfig
"""
def __init__(self, config: MLCDVisionConfig):
"""Overwrite dummy `MLCDConfig` to `MLCDVisionConfig`."""
super().__init__(config)
def forward(
self,
inputs_embeds: torch.FloatTensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, BaseModelOutput]:
r"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
position_embeddings (`tuple[torch.Tensor, torch.Tensor]`):
A tuple of two tensors, each of shape `(batch, seq_len, embed_dim)`.
Represents absolute positional embeddings for the query and key in the attention mechanism.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
"""
hidden_states = inputs_embeds
for encoder_layer in self.layers:
hidden_states = encoder_layer(
hidden_states,
position_embeddings,
attention_mask,
**kwargs,
)
return BaseModelOutput(
last_hidden_state=hidden_states,
)
@auto_docstring
class MLCDPreTrainedModel(PreTrainedModel):
config: MLCDVisionConfig
base_model_prefix = "mlcd"
supports_gradient_checkpointing = True
accepts_loss_kwargs = False
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": MLCDEncoderLayer,
"attentions": MLCDAttention,
}
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
factor = self.config.initializer_factor
if isinstance(module, MLCDVisionEmbeddings):
factor = self.config.initializer_factor
init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor)
init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor)
init.copy_(module.position_ids, torch.arange(module.position_ids.shape[-1]).expand((1, -1)))
elif isinstance(module, MLCDAttention):
factor = self.config.initializer_factor
in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
out_proj_std = (module.embed_dim**-0.5) * factor
init.normal_(module.q_proj.weight, std=in_proj_std)
init.normal_(module.k_proj.weight, std=in_proj_std)
init.normal_(module.v_proj.weight, std=in_proj_std)
init.normal_(module.out_proj.weight, std=out_proj_std)
elif isinstance(module, MLCDMLP):
factor = self.config.initializer_factor
in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
fc_std = (2 * module.config.hidden_size) ** -0.5 * factor
init.normal_(module.fc1.weight, std=fc_std)
init.normal_(module.fc2.weight, std=in_proj_std)
elif isinstance(module, MLCDVisionTransformer):
factor = self.config.initializer_factor
pos_emb_std = (module.config.hidden_size // module.config.num_attention_heads // 2) ** -0.5 * factor
init.normal_(module.class_pos_emb, mean=0.0, std=pos_emb_std)
elif isinstance(module, nn.LayerNorm):
init.zeros_(module.bias)
init.ones_(module.weight)
elif isinstance(module, nn.Linear) and module.bias is not None:
init.zeros_(module.bias)
elif isinstance(module, MLCDRotaryEmbedding):
inv_freq = 1.0 / (module.theta ** (torch.arange(0, module.dim, 2, dtype=torch.float) / module.dim))
init.copy_(module.inv_freq, inv_freq)
class MLCDVisionTransformer(CLIPVisionTransformer):
def __init__(self, config: MLCDVisionConfig):
super().__init__(config)
self.vision_rotary_embedding = MLCDRotaryEmbedding(config.hidden_size // config.num_attention_heads // 2)
self.class_pos_emb = nn.Parameter(torch.randn(1, config.hidden_size // config.num_attention_heads // 2))
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, BaseModelOutputWithPooling]:
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
num_patches_height = pixel_values.shape[-2] // self.config.patch_size
num_patches_width = pixel_values.shape[-1] // self.config.patch_size
rotary_pos_emb = self.vision_rotary_embedding(num_patches_height, num_patches_width)
rotary_pos_emb = rotary_pos_emb.to(self.class_pos_emb.device)
rotary_pos_emb = torch.cat([self.class_pos_emb, rotary_pos_emb], dim=0)
emb = torch.cat((rotary_pos_emb, rotary_pos_emb), dim=-1)
position_embeddings = (emb.cos(), emb.sin())
hidden_states = self.embeddings(pixel_values)
hidden_states = self.pre_layrnorm(hidden_states)
encoder_outputs = self.encoder(
inputs_embeds=hidden_states,
position_embeddings=position_embeddings,
**kwargs,
)
last_hidden_state = encoder_outputs[0]
pooled_output = last_hidden_state[:, 0, :]
pooled_output = self.post_layernorm(pooled_output)
return BaseModelOutputWithPooling(
last_hidden_state=last_hidden_state,
pooler_output=pooled_output,
)
class MLCDVisionModel(CLIPVisionModel):
@check_model_inputs(tie_last_hidden_states=False)
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, BaseModelOutputWithPooling]:
r"""
Example:
```python
>>> import requests
>>> from PIL import Image
>>> from transformers import AutoProcessor, MLCDVisionModel
>>> model = MLCDVisionModel.from_pretrained("DeepGlint-AI/mlcd-vit-bigG-patch14-448")
>>> processor = AutoProcessor.from_pretrained("DeepGlint-AI/mlcd-vit-bigG-patch14-448")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs, output_attentions=True)
>>> features = outputs.last_hidden_state
>>> print(f"Extracted features shape: {features.shape}")
>>> print(f"Number of attention layers: {len(outputs.attentions)}")
>>> print(f"Attention shape: {outputs.attentions[0].shape}")
```"""
return self.vision_model(
pixel_values=pixel_values,
**kwargs,
)
__all__ = [
"MLCDVisionConfig",
"MLCDPreTrainedModel",
"MLCDVisionModel",
]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/mlcd/configuration_mlcd.py | src/transformers/models/mlcd/configuration_mlcd.py | # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# This file was automatically generated from src/transformers/models/mlcd/modular_mlcd.py.
# Do NOT edit this file manually as any edits will be overwritten by the generation of
# the file from the modular. If any change should be done, please apply the change to the
# modular_mlcd.py file directly. One of our CI enforces this.
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...configuration_utils import PreTrainedConfig
class MLCDVisionConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`MLCDVisionModel`]. It is used to instantiate a MLCD
vision encoder according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the vision encoder of the MLCD
[DeepGlint-AI/mlcd-vit-bigG-patch14-336](https://huggingface.co/DeepGlint-AI/mlcd-vit-bigG-patch14-336) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 1664):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 8192):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
projection_dim (`int`, *optional*, defaults to 1024):
Dimensionality of text and vision projection layers.
num_hidden_layers (`int`, *optional*, defaults to 48):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
image_size (`int`, *optional*, defaults to 336):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 14):
The size (resolution) of each patch.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_factor (`float`, *optional*, defaults to 1.0):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
Example:
```python
>>> from transformers import MLCDVisionConfig, MLCDVisionModel
>>> # Initializing a MLCDVisionConfig with DeepGlint-AI/mlcd-vit-bigG-patch14-336 style configuration
>>> configuration = MLCDVisionConfig()
>>> # Initializing a MLCDVisionModel (with random weights) from the DeepGlint-AI/mlcd-vit-bigG-patch14-336 style configuration
>>> model = MLCDVisionModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "mlcd_vision_model"
base_config_key = "vision_config"
def __init__(
self,
hidden_size=1664,
intermediate_size=8192,
num_hidden_layers=48,
num_attention_heads=16,
num_key_value_groups=1,
num_channels=3,
image_size=336,
patch_size=14,
hidden_act="gelu",
layer_norm_eps=1e-5,
attention_dropout=0.0,
initializer_range=0.02,
initializer_factor=1.0,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_groups = num_key_value_groups
self.num_channels = num_channels
self.patch_size = patch_size
self.image_size = image_size
self.initializer_range = initializer_range
self.initializer_factor = initializer_factor
self.attention_dropout = attention_dropout
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
__all__ = ["MLCDVisionConfig"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/bros/configuration_bros.py | src/transformers/models/bros/configuration_bros.py | # coding=utf-8
# Copyright 2023-present NAVER Corp, The Microsoft Research Asia LayoutLM Team Authors and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bros model configuration"""
from ...configuration_utils import PreTrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
class BrosConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`BrosModel`]. It is used to
instantiate a Bros model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the Bros
[jinho8345/bros-base-uncased](https://huggingface.co/jinho8345/bros-base-uncased) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the Bros model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`BrosModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`BrosModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
pad_token_id (`int`, *optional*, defaults to 0):
The index of the padding token in the token vocabulary.
dim_bbox (`int`, *optional*, defaults to 8):
The dimension of the bounding box coordinates. (x0, y1, x1, y0, x1, y1, x0, y1)
bbox_scale (`float`, *optional*, defaults to 100.0):
The scale factor of the bounding box coordinates.
n_relations (`int`, *optional*, defaults to 1):
The number of relations for SpadeEE(entity extraction), SpadeEL(entity linking) head.
classifier_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the classifier head.
Examples:
```python
>>> from transformers import BrosConfig, BrosModel
>>> # Initializing a BROS jinho8345/bros-base-uncased style configuration
>>> configuration = BrosConfig()
>>> # Initializing a model from the jinho8345/bros-base-uncased style configuration
>>> model = BrosModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "bros"
def __init__(
self,
vocab_size=30522,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
pad_token_id=0,
dim_bbox=8,
bbox_scale=100.0,
n_relations=1,
classifier_dropout_prob=0.1,
**kwargs,
):
super().__init__(
vocab_size=vocab_size,
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
intermediate_size=intermediate_size,
hidden_act=hidden_act,
hidden_dropout_prob=hidden_dropout_prob,
attention_probs_dropout_prob=attention_probs_dropout_prob,
max_position_embeddings=max_position_embeddings,
type_vocab_size=type_vocab_size,
initializer_range=initializer_range,
layer_norm_eps=layer_norm_eps,
pad_token_id=pad_token_id,
**kwargs,
)
self.dim_bbox = dim_bbox
self.bbox_scale = bbox_scale
self.n_relations = n_relations
self.dim_bbox_sinusoid_emb_2d = self.hidden_size // 4
self.dim_bbox_sinusoid_emb_1d = self.dim_bbox_sinusoid_emb_2d // self.dim_bbox
self.dim_bbox_projection = self.hidden_size // self.num_attention_heads
self.classifier_dropout_prob = classifier_dropout_prob
__all__ = ["BrosConfig"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/bros/__init__.py | src/transformers/models/bros/__init__.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_bros import *
from .modeling_bros import *
from .processing_bros import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/bros/convert_bros_to_pytorch.py | src/transformers/models/bros/convert_bros_to_pytorch.py | # coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert Bros checkpoints."""
import argparse
import bros # original repo
import torch
from transformers import BrosConfig, BrosModel, BrosProcessor
from transformers.utils import logging
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
def get_configs(model_name):
bros_config = BrosConfig.from_pretrained(model_name)
return bros_config
def remove_ignore_keys_(state_dict):
ignore_keys = [
"embeddings.bbox_sinusoid_emb.inv_freq",
]
for k in ignore_keys:
state_dict.pop(k, None)
def rename_key(name):
if name == "embeddings.bbox_projection.weight":
name = "bbox_embeddings.bbox_projection.weight"
if name == "embeddings.bbox_sinusoid_emb.x_pos_emb.inv_freq":
name = "bbox_embeddings.bbox_sinusoid_emb.x_pos_emb.inv_freq"
if name == "embeddings.bbox_sinusoid_emb.y_pos_emb.inv_freq":
name = "bbox_embeddings.bbox_sinusoid_emb.y_pos_emb.inv_freq"
return name
def convert_state_dict(orig_state_dict, model):
# rename keys
for key in orig_state_dict.copy():
val = orig_state_dict.pop(key)
orig_state_dict[rename_key(key)] = val
# remove ignore keys
remove_ignore_keys_(orig_state_dict)
return orig_state_dict
def convert_bros_checkpoint(model_name, pytorch_dump_folder_path=None, push_to_hub=False):
# load original model
original_model = bros.BrosModel.from_pretrained(model_name).eval()
# load HuggingFace Model
bros_config = get_configs(model_name)
model = BrosModel.from_pretrained(model_name, config=bros_config)
model.eval()
state_dict = original_model.state_dict()
new_state_dict = convert_state_dict(state_dict, model)
model.load_state_dict(new_state_dict)
# verify results
# original BROS model require 4 points (8 float values) for each bbox, prepare bbox with [batch_size, seq_len, 8] shape
bbox = torch.tensor(
[
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.4396, 0.6720, 0.4659, 0.6720, 0.4659, 0.6850, 0.4396, 0.6850],
[0.4698, 0.6720, 0.4843, 0.6720, 0.4843, 0.6850, 0.4698, 0.6850],
[0.4698, 0.6720, 0.4843, 0.6720, 0.4843, 0.6850, 0.4698, 0.6850],
[0.2047, 0.6870, 0.2730, 0.6870, 0.2730, 0.7000, 0.2047, 0.7000],
[0.2047, 0.6870, 0.2730, 0.6870, 0.2730, 0.7000, 0.2047, 0.7000],
[1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000],
]
]
)
processor = BrosProcessor.from_pretrained(model_name)
encoding = processor("His name is Rocco.", return_tensors="pt")
encoding["bbox"] = bbox
original_hidden_states = original_model(**encoding).last_hidden_state
# pixel_values = processor(image, return_tensors="pt").pixel_values
last_hidden_states = model(**encoding).last_hidden_state
assert torch.allclose(original_hidden_states, last_hidden_states, atol=1e-4)
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor to {pytorch_dump_folder_path}")
model.save_pretrained(pytorch_dump_folder_path)
processor.save_pretrained(pytorch_dump_folder_path)
if push_to_hub:
model.push_to_hub("jinho8345/" + model_name.split("/")[-1], commit_message="Update model")
processor.push_to_hub("jinho8345/" + model_name.split("/")[-1], commit_message="Update model")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jinho8345/bros-base-uncased",
required=False,
type=str,
help="Name of the original model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the Hugging Face hub.",
)
args = parser.parse_args()
convert_bros_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/bros/modeling_bros.py | src/transformers/models/bros/modeling_bros.py | # coding=utf-8
# Copyright 2023-present NAVER Corp, The Microsoft Research Asia LayoutLM Team Authors and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch Bros model."""
import math
from dataclasses import dataclass
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import initialization as init
from ...activations import ACT2FN
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import (
BaseModelOutputWithCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
TokenClassifierOutput,
)
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import apply_chunking_to_forward
from ...utils import ModelOutput, auto_docstring, can_return_tuple, logging
from .configuration_bros import BrosConfig
logger = logging.get_logger(__name__)
@dataclass
@auto_docstring(
custom_intro="""
Base class for outputs of token classification models.
"""
)
class BrosSpadeOutput(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification loss.
initial_token_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`):
Classification scores for entity initial tokens (before SoftMax).
subsequent_token_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, sequence_length+1)`):
Classification scores for entity sequence tokens (before SoftMax).
"""
loss: Optional[torch.FloatTensor] = None
initial_token_logits: Optional[torch.FloatTensor] = None
subsequent_token_logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
class BrosPositionalEmbedding1D(nn.Module):
# Reference: https://github.com/kimiyoung/transformer-xl/blob/master/pytorch/mem_transformer.py#L15
def __init__(self, config):
super().__init__()
self.dim_bbox_sinusoid_emb_1d = config.dim_bbox_sinusoid_emb_1d
inv_freq = 1 / (
10000 ** (torch.arange(0.0, self.dim_bbox_sinusoid_emb_1d, 2.0) / self.dim_bbox_sinusoid_emb_1d)
)
self.register_buffer("inv_freq", inv_freq)
def forward(self, pos_seq: torch.Tensor) -> torch.Tensor:
seq_size = pos_seq.size()
b1, b2, b3 = seq_size
sinusoid_inp = pos_seq.view(b1, b2, b3, 1) * self.inv_freq.view(1, 1, 1, self.dim_bbox_sinusoid_emb_1d // 2)
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
return pos_emb
class BrosPositionalEmbedding2D(nn.Module):
def __init__(self, config):
super().__init__()
self.dim_bbox = config.dim_bbox
self.x_pos_emb = BrosPositionalEmbedding1D(config)
self.y_pos_emb = BrosPositionalEmbedding1D(config)
def forward(self, bbox: torch.Tensor) -> torch.Tensor:
stack = []
for i in range(self.dim_bbox):
if i % 2 == 0:
stack.append(self.x_pos_emb(bbox[..., i]))
else:
stack.append(self.y_pos_emb(bbox[..., i]))
bbox_pos_emb = torch.cat(stack, dim=-1)
return bbox_pos_emb
class BrosBboxEmbeddings(nn.Module):
def __init__(self, config):
super().__init__()
self.bbox_sinusoid_emb = BrosPositionalEmbedding2D(config)
self.bbox_projection = nn.Linear(config.dim_bbox_sinusoid_emb_2d, config.dim_bbox_projection, bias=False)
def forward(self, bbox: torch.Tensor):
bbox_t = bbox.transpose(0, 1)
bbox_pos = bbox_t[None, :, :, :] - bbox_t[:, None, :, :]
bbox_pos_emb = self.bbox_sinusoid_emb(bbox_pos)
bbox_pos_emb = self.bbox_projection(bbox_pos_emb)
return bbox_pos_emb
class BrosTextEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
self.register_buffer(
"token_type_ids",
torch.zeros(
self.position_ids.size(),
dtype=torch.long,
device=self.position_ids.device,
),
persistent=False,
)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if token_type_ids is None:
if hasattr(self, "token_type_ids"):
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BrosSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.is_decoder = config.is_decoder
def forward(
self,
hidden_states: torch.Tensor,
bbox_pos_emb: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[torch.Tensor] = False,
) -> tuple[torch.Tensor]:
hidden_shape = (hidden_states.shape[0], -1, self.num_attention_heads, self.attention_head_size)
query_layer = self.query(hidden_states).view(hidden_shape).transpose(1, 2)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention:
key_layer = self.key(encoder_hidden_states).view(hidden_shape).transpose(1, 2)
value_layer = self.value(encoder_hidden_states).view(hidden_shape).transpose(1, 2)
attention_mask = encoder_attention_mask
else:
key_layer = self.key(hidden_states).view(hidden_shape).transpose(1, 2)
value_layer = self.value(hidden_states).view(hidden_shape).transpose(1, 2)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
# bbox positional encoding
batch_size, n_head, seq_length, d_head = query_layer.shape
bbox_pos_emb = bbox_pos_emb.view(seq_length, seq_length, batch_size, d_head)
bbox_pos_emb = bbox_pos_emb.permute([2, 0, 1, 3])
bbox_pos_scores = torch.einsum("bnid,bijd->bnij", (query_layer, bbox_pos_emb))
attention_scores = attention_scores + bbox_pos_scores
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BrosModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
if self.is_decoder:
outputs = outputs + (None,)
return outputs
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->Bros
class BrosSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BrosAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = BrosSelfAttention(config)
self.output = BrosSelfOutput(config)
def forward(
self,
hidden_states: torch.Tensor,
bbox_pos_emb: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
) -> tuple[torch.Tensor]:
self_outputs = self.self(
hidden_states=hidden_states,
bbox_pos_emb=bbox_pos_emb,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Bros
class BrosIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BrosOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BrosLayer(GradientCheckpointingLayer):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BrosAttention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
if not self.is_decoder:
raise Exception(f"{self} should be used as a decoder model if cross attention is added")
self.crossattention = BrosAttention(config)
self.intermediate = BrosIntermediate(config)
self.output = BrosOutput(config)
def forward(
self,
hidden_states: torch.Tensor,
bbox_pos_emb: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
) -> tuple[torch.Tensor]:
self_attention_outputs = self.attention(
hidden_states,
bbox_pos_emb=bbox_pos_emb,
attention_mask=attention_mask,
output_attentions=output_attentions,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
else:
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
if self.is_decoder and encoder_hidden_states is not None:
if hasattr(self, "crossattention"):
raise Exception(
f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
)
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk,
self.chunk_size_feed_forward,
self.seq_len_dim,
attention_output,
)
outputs = (layer_output,) + outputs
# if decoder, return the attn key/values as the last output
if self.is_decoder:
outputs = outputs + (None,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BrosEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([BrosLayer(config) for _ in range(config.num_hidden_layers)])
@can_return_tuple
def forward(
self,
hidden_states: torch.Tensor,
bbox_pos_emb: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
output_hidden_states: Optional[bool] = False,
return_dict: Optional[bool] = True,
) -> Union[tuple[torch.Tensor], BaseModelOutputWithCrossAttentions]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states=hidden_states,
bbox_pos_emb=bbox_pos_emb,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
return BaseModelOutputWithCrossAttentions(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
# Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->Bros
class BrosPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BrosRelationExtractor(nn.Module):
def __init__(self, config):
super().__init__()
self.n_relations = config.n_relations
self.backbone_hidden_size = config.hidden_size
self.head_hidden_size = config.hidden_size
self.classifier_dropout_prob = config.classifier_dropout_prob
self.drop = nn.Dropout(self.classifier_dropout_prob)
self.query = nn.Linear(self.backbone_hidden_size, self.n_relations * self.head_hidden_size)
self.key = nn.Linear(self.backbone_hidden_size, self.n_relations * self.head_hidden_size)
self.dummy_node = nn.Parameter(torch.zeros(1, self.backbone_hidden_size))
def forward(self, query_layer: torch.Tensor, key_layer: torch.Tensor):
query_layer = self.query(self.drop(query_layer))
dummy_vec = self.dummy_node.unsqueeze(0).repeat(1, key_layer.size(1), 1)
key_layer = torch.cat([key_layer, dummy_vec], axis=0)
key_layer = self.key(self.drop(key_layer))
query_layer = query_layer.view(
query_layer.size(0), query_layer.size(1), self.n_relations, self.head_hidden_size
)
key_layer = key_layer.view(key_layer.size(0), key_layer.size(1), self.n_relations, self.head_hidden_size)
relation_score = torch.matmul(
query_layer.permute(2, 1, 0, 3), key_layer.permute(2, 1, 3, 0)
) # equivalent to torch.einsum("ibnd,jbnd->nbij", (query_layer, key_layer))
return relation_score
@auto_docstring
class BrosPreTrainedModel(PreTrainedModel):
config: BrosConfig
base_model_prefix = "bros"
@torch.no_grad()
def _init_weights(self, module: nn.Module):
"""Initialize the weights"""
super()._init_weights(module)
std = self.config.initializer_range
if isinstance(module, BrosRelationExtractor):
init.normal_(module.dummy_node, std=std)
elif isinstance(module, BrosTextEmbeddings):
init.copy_(module.position_ids, torch.arange(module.position_ids.shape[-1]).expand((1, -1)))
init.zeros_(module.token_type_ids)
elif isinstance(module, BrosPositionalEmbedding1D):
inv_freq = 1 / (
10000 ** (torch.arange(0.0, module.dim_bbox_sinusoid_emb_1d, 2.0) / module.dim_bbox_sinusoid_emb_1d)
)
init.copy_(module.inv_freq, inv_freq)
@auto_docstring
class BrosModel(BrosPreTrainedModel):
def __init__(self, config, add_pooling_layer=True):
r"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.embeddings = BrosTextEmbeddings(config)
self.bbox_embeddings = BrosBboxEmbeddings(config)
self.encoder = BrosEncoder(config)
self.pooler = BrosPooler(config) if add_pooling_layer else None
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
bbox: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**kwargs,
) -> Union[tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
r"""
bbox ('torch.FloatTensor' of shape '(batch_size, num_boxes, 4)'):
Bounding box coordinates for each token in the input sequence. Each bounding box is a list of four values
(x1, y1, x2, y2), where (x1, y1) is the top left corner, and (x2, y2) is the bottom right corner of the
bounding box.
Examples:
```python
>>> import torch
>>> from transformers import BrosProcessor, BrosModel
>>> processor = BrosProcessor.from_pretrained("jinho8345/bros-base-uncased")
>>> model = BrosModel.from_pretrained("jinho8345/bros-base-uncased")
>>> encoding = processor("Hello, my dog is cute", add_special_tokens=False, return_tensors="pt")
>>> bbox = torch.tensor([[[0, 0, 1, 1]]]).repeat(1, encoding["input_ids"].shape[-1], 1)
>>> encoding["bbox"] = bbox
>>> outputs = model(**encoding)
>>> last_hidden_states = outputs.last_hidden_state
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if bbox is None:
raise ValueError("You have to specify bbox")
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
)
# if bbox has 2 points (4 float tensors) per token, convert it to 4 points (8 float tensors) per token
if bbox.shape[-1] == 4:
bbox = bbox[:, :, [0, 1, 2, 1, 2, 3, 0, 3]]
scaled_bbox = bbox * self.config.bbox_scale
bbox_position_embeddings = self.bbox_embeddings(scaled_bbox)
encoder_outputs = self.encoder(
embedding_output,
bbox_pos_emb=bbox_position_embeddings,
attention_mask=extended_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@auto_docstring
class BrosForTokenClassification(BrosPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bros = BrosModel(config)
classifier_dropout = (
config.classifier_dropout if hasattr(config, "classifier_dropout") else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
bbox: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
bbox_first_token_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**kwargs,
) -> Union[tuple[torch.Tensor], TokenClassifierOutput]:
r"""
bbox ('torch.FloatTensor' of shape '(batch_size, num_boxes, 4)'):
Bounding box coordinates for each token in the input sequence. Each bounding box is a list of four values
(x1, y1, x2, y2), where (x1, y1) is the top left corner, and (x2, y2) is the bottom right corner of the
bounding box.
bbox_first_token_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to indicate the first token of each bounding box. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
Examples:
```python
>>> import torch
>>> from transformers import BrosProcessor, BrosForTokenClassification
>>> processor = BrosProcessor.from_pretrained("jinho8345/bros-base-uncased")
>>> model = BrosForTokenClassification.from_pretrained("jinho8345/bros-base-uncased")
>>> encoding = processor("Hello, my dog is cute", add_special_tokens=False, return_tensors="pt")
>>> bbox = torch.tensor([[[0, 0, 1, 1]]]).repeat(1, encoding["input_ids"].shape[-1], 1)
>>> encoding["bbox"] = bbox
>>> outputs = model(**encoding)
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bros(
input_ids,
bbox=bbox,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
if bbox_first_token_mask is not None:
bbox_first_token_mask = bbox_first_token_mask.view(-1)
loss = loss_fct(
logits.view(-1, self.num_labels)[bbox_first_token_mask], labels.view(-1)[bbox_first_token_mask]
)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring(
custom_intro="""
Bros Model with a token classification head on top (initial_token_layers and subsequent_token_layer on top of the
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | true |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/bros/processing_bros.py | src/transformers/models/bros/processing_bros.py | # coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Processor class for Bros.
"""
from ...processing_utils import ProcessingKwargs, ProcessorMixin
class BrosProcessorKwargs(ProcessingKwargs, total=False):
_defaults = {
"text_kwargs": {
"add_special_tokens": True,
"padding": False,
"stride": 0,
"return_overflowing_tokens": False,
"return_special_tokens_mask": False,
"return_offsets_mapping": False,
"return_length": False,
"verbose": True,
},
}
class BrosProcessor(ProcessorMixin):
r"""
Constructs a Bros processor which wraps a BERT tokenizer.
[`BrosProcessor`] offers all the functionalities of [`BertTokenizerFast`]. See the docstring of
[`~BrosProcessor.__call__`] and [`~BrosProcessor.decode`] for more information.
Args:
tokenizer (`BertTokenizerFast`, *optional*):
An instance of ['BertTokenizerFast`]. The tokenizer is a required input.
"""
valid_processor_kwargs = BrosProcessorKwargs
def __init__(self, tokenizer=None, **kwargs):
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(tokenizer)
__all__ = ["BrosProcessor"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/mobilevit/modeling_mobilevit.py | src/transformers/models/mobilevit/modeling_mobilevit.py | # coding=utf-8
# Copyright 2022 Apple Inc. and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Original license: https://github.com/apple/ml-cvnets/blob/main/LICENSE
"""PyTorch MobileViT model."""
import math
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import initialization as init
from ...activations import ACT2FN
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
SemanticSegmenterOutput,
)
from ...modeling_utils import PreTrainedModel
from ...utils import auto_docstring, logging, torch_int
from .configuration_mobilevit import MobileViTConfig
logger = logging.get_logger(__name__)
def make_divisible(value: int, divisor: int = 8, min_value: Optional[int] = None) -> int:
"""
Ensure that all layers have a channel count that is divisible by `divisor`.
"""
if min_value is None:
min_value = divisor
new_value = max(min_value, int(value + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_value < 0.9 * value:
new_value += divisor
return int(new_value)
class MobileViTConvLayer(nn.Module):
def __init__(
self,
config: MobileViTConfig,
in_channels: int,
out_channels: int,
kernel_size: int,
stride: int = 1,
groups: int = 1,
bias: bool = False,
dilation: int = 1,
use_normalization: bool = True,
use_activation: Union[bool, str] = True,
) -> None:
super().__init__()
padding = int((kernel_size - 1) / 2) * dilation
if in_channels % groups != 0:
raise ValueError(f"Input channels ({in_channels}) are not divisible by {groups} groups.")
if out_channels % groups != 0:
raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups.")
self.convolution = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
padding_mode="zeros",
)
if use_normalization:
self.normalization = nn.BatchNorm2d(
num_features=out_channels,
eps=1e-5,
momentum=0.1,
affine=True,
track_running_stats=True,
)
else:
self.normalization = None
if use_activation:
if isinstance(use_activation, str):
self.activation = ACT2FN[use_activation]
elif isinstance(config.hidden_act, str):
self.activation = ACT2FN[config.hidden_act]
else:
self.activation = config.hidden_act
else:
self.activation = None
def forward(self, features: torch.Tensor) -> torch.Tensor:
features = self.convolution(features)
if self.normalization is not None:
features = self.normalization(features)
if self.activation is not None:
features = self.activation(features)
return features
class MobileViTInvertedResidual(nn.Module):
"""
Inverted residual block (MobileNetv2): https://huggingface.co/papers/1801.04381
"""
def __init__(
self, config: MobileViTConfig, in_channels: int, out_channels: int, stride: int, dilation: int = 1
) -> None:
super().__init__()
expanded_channels = make_divisible(int(round(in_channels * config.expand_ratio)), 8)
if stride not in [1, 2]:
raise ValueError(f"Invalid stride {stride}.")
self.use_residual = (stride == 1) and (in_channels == out_channels)
self.expand_1x1 = MobileViTConvLayer(
config, in_channels=in_channels, out_channels=expanded_channels, kernel_size=1
)
self.conv_3x3 = MobileViTConvLayer(
config,
in_channels=expanded_channels,
out_channels=expanded_channels,
kernel_size=3,
stride=stride,
groups=expanded_channels,
dilation=dilation,
)
self.reduce_1x1 = MobileViTConvLayer(
config,
in_channels=expanded_channels,
out_channels=out_channels,
kernel_size=1,
use_activation=False,
)
def forward(self, features: torch.Tensor) -> torch.Tensor:
residual = features
features = self.expand_1x1(features)
features = self.conv_3x3(features)
features = self.reduce_1x1(features)
return residual + features if self.use_residual else features
class MobileViTMobileNetLayer(nn.Module):
def __init__(
self, config: MobileViTConfig, in_channels: int, out_channels: int, stride: int = 1, num_stages: int = 1
) -> None:
super().__init__()
self.layer = nn.ModuleList()
for i in range(num_stages):
layer = MobileViTInvertedResidual(
config,
in_channels=in_channels,
out_channels=out_channels,
stride=stride if i == 0 else 1,
)
self.layer.append(layer)
in_channels = out_channels
def forward(self, features: torch.Tensor) -> torch.Tensor:
for layer_module in self.layer:
features = layer_module(features)
return features
class MobileViTSelfAttention(nn.Module):
def __init__(self, config: MobileViTConfig, hidden_size: int) -> None:
super().__init__()
if hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size {hidden_size} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(hidden_size, self.all_head_size, bias=config.qkv_bias)
self.key = nn.Linear(hidden_size, self.all_head_size, bias=config.qkv_bias)
self.value = nn.Linear(hidden_size, self.all_head_size, bias=config.qkv_bias)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
batch_size, seq_length, _ = hidden_states.shape
query_layer = (
self.query(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
key_layer = (
self.key(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
value_layer = (
self.value(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class MobileViTSelfOutput(nn.Module):
def __init__(self, config: MobileViTConfig, hidden_size: int) -> None:
super().__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
class MobileViTAttention(nn.Module):
def __init__(self, config: MobileViTConfig, hidden_size: int) -> None:
super().__init__()
self.attention = MobileViTSelfAttention(config, hidden_size)
self.output = MobileViTSelfOutput(config, hidden_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
self_outputs = self.attention(hidden_states)
attention_output = self.output(self_outputs)
return attention_output
class MobileViTIntermediate(nn.Module):
def __init__(self, config: MobileViTConfig, hidden_size: int, intermediate_size: int) -> None:
super().__init__()
self.dense = nn.Linear(hidden_size, intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class MobileViTOutput(nn.Module):
def __init__(self, config: MobileViTConfig, hidden_size: int, intermediate_size: int) -> None:
super().__init__()
self.dense = nn.Linear(intermediate_size, hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = hidden_states + input_tensor
return hidden_states
class MobileViTTransformerLayer(nn.Module):
def __init__(self, config: MobileViTConfig, hidden_size: int, intermediate_size: int) -> None:
super().__init__()
self.attention = MobileViTAttention(config, hidden_size)
self.intermediate = MobileViTIntermediate(config, hidden_size, intermediate_size)
self.output = MobileViTOutput(config, hidden_size, intermediate_size)
self.layernorm_before = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps)
self.layernorm_after = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
attention_output = self.attention(self.layernorm_before(hidden_states))
hidden_states = attention_output + hidden_states
layer_output = self.layernorm_after(hidden_states)
layer_output = self.intermediate(layer_output)
layer_output = self.output(layer_output, hidden_states)
return layer_output
class MobileViTTransformer(nn.Module):
def __init__(self, config: MobileViTConfig, hidden_size: int, num_stages: int) -> None:
super().__init__()
self.layer = nn.ModuleList()
for _ in range(num_stages):
transformer_layer = MobileViTTransformerLayer(
config,
hidden_size=hidden_size,
intermediate_size=int(hidden_size * config.mlp_ratio),
)
self.layer.append(transformer_layer)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
for layer_module in self.layer:
hidden_states = layer_module(hidden_states)
return hidden_states
class MobileViTLayer(GradientCheckpointingLayer):
"""
MobileViT block: https://huggingface.co/papers/2110.02178
"""
def __init__(
self,
config: MobileViTConfig,
in_channels: int,
out_channels: int,
stride: int,
hidden_size: int,
num_stages: int,
dilation: int = 1,
) -> None:
super().__init__()
self.patch_width = config.patch_size
self.patch_height = config.patch_size
if stride == 2:
self.downsampling_layer = MobileViTInvertedResidual(
config,
in_channels=in_channels,
out_channels=out_channels,
stride=stride if dilation == 1 else 1,
dilation=dilation // 2 if dilation > 1 else 1,
)
in_channels = out_channels
else:
self.downsampling_layer = None
self.conv_kxk = MobileViTConvLayer(
config,
in_channels=in_channels,
out_channels=in_channels,
kernel_size=config.conv_kernel_size,
)
self.conv_1x1 = MobileViTConvLayer(
config,
in_channels=in_channels,
out_channels=hidden_size,
kernel_size=1,
use_normalization=False,
use_activation=False,
)
self.transformer = MobileViTTransformer(
config,
hidden_size=hidden_size,
num_stages=num_stages,
)
self.layernorm = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps)
self.conv_projection = MobileViTConvLayer(
config, in_channels=hidden_size, out_channels=in_channels, kernel_size=1
)
self.fusion = MobileViTConvLayer(
config, in_channels=2 * in_channels, out_channels=in_channels, kernel_size=config.conv_kernel_size
)
def unfolding(self, features: torch.Tensor) -> tuple[torch.Tensor, dict]:
patch_width, patch_height = self.patch_width, self.patch_height
patch_area = int(patch_width * patch_height)
batch_size, channels, orig_height, orig_width = features.shape
new_height = (
torch_int(torch.ceil(orig_height / patch_height) * patch_height)
if torch.jit.is_tracing()
else int(math.ceil(orig_height / patch_height) * patch_height)
)
new_width = (
torch_int(torch.ceil(orig_width / patch_width) * patch_width)
if torch.jit.is_tracing()
else int(math.ceil(orig_width / patch_width) * patch_width)
)
interpolate = False
if new_width != orig_width or new_height != orig_height:
# Note: Padding can be done, but then it needs to be handled in attention function.
features = nn.functional.interpolate(
features, size=(new_height, new_width), mode="bilinear", align_corners=False
)
interpolate = True
# number of patches along width and height
num_patch_width = new_width // patch_width
num_patch_height = new_height // patch_height
num_patches = num_patch_height * num_patch_width
# convert from shape (batch_size, channels, orig_height, orig_width)
# to the shape (batch_size * patch_area, num_patches, channels)
patches = features.reshape(
batch_size * channels * num_patch_height, patch_height, num_patch_width, patch_width
)
patches = patches.transpose(1, 2)
patches = patches.reshape(batch_size, channels, num_patches, patch_area)
patches = patches.transpose(1, 3)
patches = patches.reshape(batch_size * patch_area, num_patches, -1)
info_dict = {
"orig_size": (orig_height, orig_width),
"batch_size": batch_size,
"channels": channels,
"interpolate": interpolate,
"num_patches": num_patches,
"num_patches_width": num_patch_width,
"num_patches_height": num_patch_height,
}
return patches, info_dict
def folding(self, patches: torch.Tensor, info_dict: dict) -> torch.Tensor:
patch_width, patch_height = self.patch_width, self.patch_height
patch_area = int(patch_width * patch_height)
batch_size = info_dict["batch_size"]
channels = info_dict["channels"]
num_patches = info_dict["num_patches"]
num_patch_height = info_dict["num_patches_height"]
num_patch_width = info_dict["num_patches_width"]
# convert from shape (batch_size * patch_area, num_patches, channels)
# back to shape (batch_size, channels, orig_height, orig_width)
features = patches.contiguous().view(batch_size, patch_area, num_patches, -1)
features = features.transpose(1, 3)
features = features.reshape(
batch_size * channels * num_patch_height, num_patch_width, patch_height, patch_width
)
features = features.transpose(1, 2)
features = features.reshape(
batch_size, channels, num_patch_height * patch_height, num_patch_width * patch_width
)
if info_dict["interpolate"]:
features = nn.functional.interpolate(
features, size=info_dict["orig_size"], mode="bilinear", align_corners=False
)
return features
def forward(self, features: torch.Tensor) -> torch.Tensor:
# reduce spatial dimensions if needed
if self.downsampling_layer:
features = self.downsampling_layer(features)
residual = features
# local representation
features = self.conv_kxk(features)
features = self.conv_1x1(features)
# convert feature map to patches
patches, info_dict = self.unfolding(features)
# learn global representations
patches = self.transformer(patches)
patches = self.layernorm(patches)
# convert patches back to feature maps
features = self.folding(patches, info_dict)
features = self.conv_projection(features)
features = self.fusion(torch.cat((residual, features), dim=1))
return features
class MobileViTEncoder(nn.Module):
def __init__(self, config: MobileViTConfig) -> None:
super().__init__()
self.config = config
self.layer = nn.ModuleList()
self.gradient_checkpointing = False
# segmentation architectures like DeepLab and PSPNet modify the strides
# of the classification backbones
dilate_layer_4 = dilate_layer_5 = False
if config.output_stride == 8:
dilate_layer_4 = True
dilate_layer_5 = True
elif config.output_stride == 16:
dilate_layer_5 = True
dilation = 1
layer_1 = MobileViTMobileNetLayer(
config,
in_channels=config.neck_hidden_sizes[0],
out_channels=config.neck_hidden_sizes[1],
stride=1,
num_stages=1,
)
self.layer.append(layer_1)
layer_2 = MobileViTMobileNetLayer(
config,
in_channels=config.neck_hidden_sizes[1],
out_channels=config.neck_hidden_sizes[2],
stride=2,
num_stages=3,
)
self.layer.append(layer_2)
layer_3 = MobileViTLayer(
config,
in_channels=config.neck_hidden_sizes[2],
out_channels=config.neck_hidden_sizes[3],
stride=2,
hidden_size=config.hidden_sizes[0],
num_stages=2,
)
self.layer.append(layer_3)
if dilate_layer_4:
dilation *= 2
layer_4 = MobileViTLayer(
config,
in_channels=config.neck_hidden_sizes[3],
out_channels=config.neck_hidden_sizes[4],
stride=2,
hidden_size=config.hidden_sizes[1],
num_stages=4,
dilation=dilation,
)
self.layer.append(layer_4)
if dilate_layer_5:
dilation *= 2
layer_5 = MobileViTLayer(
config,
in_channels=config.neck_hidden_sizes[4],
out_channels=config.neck_hidden_sizes[5],
stride=2,
hidden_size=config.hidden_sizes[2],
num_stages=3,
dilation=dilation,
)
self.layer.append(layer_5)
def forward(
self,
hidden_states: torch.Tensor,
output_hidden_states: bool = False,
return_dict: bool = True,
) -> Union[tuple, BaseModelOutputWithNoAttention]:
all_hidden_states = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(last_hidden_state=hidden_states, hidden_states=all_hidden_states)
@auto_docstring
class MobileViTPreTrainedModel(PreTrainedModel):
config: MobileViTConfig
base_model_prefix = "mobilevit"
main_input_name = "pixel_values"
input_modalities = ("image",)
supports_gradient_checkpointing = True
_no_split_modules = ["MobileViTLayer"]
@torch.no_grad()
def _init_weights(self, module: nn.Module) -> None:
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)):
init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
init.zeros_(module.bias)
if getattr(module, "running_mean", None) is not None:
init.zeros_(module.running_mean)
init.ones_(module.running_var)
init.zeros_(module.num_batches_tracked)
elif isinstance(module, nn.LayerNorm):
init.zeros_(module.bias)
init.ones_(module.weight)
@auto_docstring
class MobileViTModel(MobileViTPreTrainedModel):
def __init__(self, config: MobileViTConfig, expand_output: bool = True):
r"""
expand_output (`bool`, *optional*, defaults to `True`):
Whether to expand the output of the model using a 1x1 convolution. If `True`, the model will apply an additional
1x1 convolution to expand the output channels from `config.neck_hidden_sizes[5]` to `config.neck_hidden_sizes[6]`.
"""
super().__init__(config)
self.config = config
self.expand_output = expand_output
self.conv_stem = MobileViTConvLayer(
config,
in_channels=config.num_channels,
out_channels=config.neck_hidden_sizes[0],
kernel_size=3,
stride=2,
)
self.encoder = MobileViTEncoder(config)
if self.expand_output:
self.conv_1x1_exp = MobileViTConvLayer(
config,
in_channels=config.neck_hidden_sizes[5],
out_channels=config.neck_hidden_sizes[6],
kernel_size=1,
)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.Tensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**kwargs,
) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
embedding_output = self.conv_stem(pixel_values)
encoder_outputs = self.encoder(
embedding_output,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if self.expand_output:
last_hidden_state = self.conv_1x1_exp(encoder_outputs[0])
# global average pooling: (batch_size, channels, height, width) -> (batch_size, channels)
pooled_output = torch.mean(last_hidden_state, dim=[-2, -1], keepdim=False)
else:
last_hidden_state = encoder_outputs[0]
pooled_output = None
if not return_dict:
output = (last_hidden_state, pooled_output) if pooled_output is not None else (last_hidden_state,)
return output + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=last_hidden_state,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
)
@auto_docstring(
custom_intro="""
MobileViT model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
"""
)
class MobileViTForImageClassification(MobileViTPreTrainedModel):
def __init__(self, config: MobileViTConfig) -> None:
super().__init__(config)
self.num_labels = config.num_labels
self.mobilevit = MobileViTModel(config)
# Classifier head
self.dropout = nn.Dropout(config.classifier_dropout_prob, inplace=True)
self.classifier = (
nn.Linear(config.neck_hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.Tensor] = None,
output_hidden_states: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
return_dict: Optional[bool] = None,
**kwargs,
) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss). If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.mobilevit(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
pooled_output = outputs.pooler_output if return_dict else outputs[1]
logits = self.classifier(self.dropout(pooled_output))
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
)
class MobileViTASPPPooling(nn.Module):
def __init__(self, config: MobileViTConfig, in_channels: int, out_channels: int) -> None:
super().__init__()
self.global_pool = nn.AdaptiveAvgPool2d(output_size=1)
self.conv_1x1 = MobileViTConvLayer(
config,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=1,
use_normalization=True,
use_activation="relu",
)
def forward(self, features: torch.Tensor) -> torch.Tensor:
spatial_size = features.shape[-2:]
features = self.global_pool(features)
features = self.conv_1x1(features)
features = nn.functional.interpolate(features, size=spatial_size, mode="bilinear", align_corners=False)
return features
class MobileViTASPP(nn.Module):
"""
ASPP module defined in DeepLab papers: https://huggingface.co/papers/1606.00915, https://huggingface.co/papers/1706.05587
"""
def __init__(self, config: MobileViTConfig) -> None:
super().__init__()
in_channels = config.neck_hidden_sizes[-2]
out_channels = config.aspp_out_channels
if len(config.atrous_rates) != 3:
raise ValueError("Expected 3 values for atrous_rates")
self.convs = nn.ModuleList()
in_projection = MobileViTConvLayer(
config,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
use_activation="relu",
)
self.convs.append(in_projection)
self.convs.extend(
[
MobileViTConvLayer(
config,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
dilation=rate,
use_activation="relu",
)
for rate in config.atrous_rates
]
)
pool_layer = MobileViTASPPPooling(config, in_channels, out_channels)
self.convs.append(pool_layer)
self.project = MobileViTConvLayer(
config, in_channels=5 * out_channels, out_channels=out_channels, kernel_size=1, use_activation="relu"
)
self.dropout = nn.Dropout(p=config.aspp_dropout_prob)
def forward(self, features: torch.Tensor) -> torch.Tensor:
pyramid = []
for conv in self.convs:
pyramid.append(conv(features))
pyramid = torch.cat(pyramid, dim=1)
pooled_features = self.project(pyramid)
pooled_features = self.dropout(pooled_features)
return pooled_features
class MobileViTDeepLabV3(nn.Module):
"""
DeepLabv3 architecture: https://huggingface.co/papers/1706.05587
"""
def __init__(self, config: MobileViTConfig) -> None:
super().__init__()
self.aspp = MobileViTASPP(config)
self.dropout = nn.Dropout2d(config.classifier_dropout_prob)
self.classifier = MobileViTConvLayer(
config,
in_channels=config.aspp_out_channels,
out_channels=config.num_labels,
kernel_size=1,
use_normalization=False,
use_activation=False,
bias=True,
)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
features = self.aspp(hidden_states[-1])
features = self.dropout(features)
features = self.classifier(features)
return features
@auto_docstring(
custom_intro="""
MobileViT model with a semantic segmentation head on top, e.g. for Pascal VOC.
"""
)
class MobileViTForSemanticSegmentation(MobileViTPreTrainedModel):
def __init__(self, config: MobileViTConfig) -> None:
super().__init__(config)
self.num_labels = config.num_labels
self.mobilevit = MobileViTModel(config, expand_output=False)
self.segmentation_head = MobileViTDeepLabV3(config)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**kwargs,
) -> Union[tuple, SemanticSegmenterOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> import requests
>>> import torch
>>> from PIL import Image
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | true |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/mobilevit/image_processing_mobilevit_fast.py | src/transformers/models/mobilevit/image_processing_mobilevit_fast.py | # coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for MobileViT."""
from typing import Optional, Union
import torch
from torchvision.transforms.v2 import functional as F
from ...image_processing_utils import BatchFeature
from ...image_processing_utils_fast import (
BaseImageProcessorFast,
group_images_by_shape,
reorder_images,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
SizeDict,
is_torch_tensor,
)
from ...processing_utils import Unpack
from ...utils import (
TensorType,
auto_docstring,
)
from .image_processing_mobilevit import MobileVitImageProcessorKwargs
@auto_docstring
class MobileViTImageProcessorFast(BaseImageProcessorFast):
resample = PILImageResampling.BILINEAR
size = {"shortest_edge": 224}
default_to_square = False
crop_size = {"height": 256, "width": 256}
do_resize = True
do_center_crop = True
do_rescale = True
do_normalize = None
do_convert_rgb = None
do_flip_channel_order = True
do_reduce_labels = False
valid_kwargs = MobileVitImageProcessorKwargs
def __init__(self, **kwargs: Unpack[MobileVitImageProcessorKwargs]):
super().__init__(**kwargs)
# Copied from transformers.models.beit.image_processing_beit_fast.BeitImageProcessorFast.reduce_label
def reduce_label(self, labels: list["torch.Tensor"]):
for idx in range(len(labels)):
label = labels[idx]
label = torch.where(label == 0, torch.tensor(255, dtype=label.dtype), label)
label = label - 1
label = torch.where(label == 254, torch.tensor(255, dtype=label.dtype), label)
labels[idx] = label
return label
@auto_docstring
def preprocess(
self,
images: ImageInput,
segmentation_maps: Optional[ImageInput] = None,
**kwargs: Unpack[MobileVitImageProcessorKwargs],
) -> BatchFeature:
r"""
segmentation_maps (`ImageInput`, *optional*):
The segmentation maps to preprocess.
"""
return super().preprocess(images, segmentation_maps, **kwargs)
def _preprocess_image_like_inputs(
self,
images: ImageInput,
segmentation_maps: Optional[ImageInput],
do_convert_rgb: bool,
input_data_format: ChannelDimension,
device: Optional[Union[str, "torch.device"]] = None,
**kwargs: Unpack[MobileVitImageProcessorKwargs],
) -> BatchFeature:
"""
Preprocess image-like inputs.
"""
images = self._prepare_image_like_inputs(
images=images, do_convert_rgb=do_convert_rgb, input_data_format=input_data_format, device=device
)
images_kwargs = kwargs.copy()
images_kwargs["do_reduce_labels"] = False
batch_feature = self._preprocess(images, **images_kwargs)
if segmentation_maps is not None:
processed_segmentation_maps = self._prepare_image_like_inputs(
images=segmentation_maps,
expected_ndims=2,
do_convert_rgb=False,
input_data_format=ChannelDimension.FIRST,
)
segmentation_maps_kwargs = kwargs.copy()
segmentation_maps_kwargs.update(
{
"do_rescale": False,
"do_flip_channel_order": False,
# Nearest interpolation is used for segmentation maps instead of BILINEAR.
"interpolation": F.InterpolationMode.NEAREST_EXACT,
}
)
processed_segmentation_maps = self._preprocess(
images=processed_segmentation_maps, **segmentation_maps_kwargs
).pixel_values
batch_feature["labels"] = processed_segmentation_maps.squeeze(1).to(torch.int64)
return batch_feature
def _preprocess(
self,
images: list["torch.Tensor"],
do_reduce_labels: bool,
do_resize: bool,
size: Optional[SizeDict],
interpolation: Optional["F.InterpolationMode"],
do_rescale: bool,
rescale_factor: Optional[float],
do_center_crop: bool,
crop_size: Optional[SizeDict],
do_flip_channel_order: bool,
disable_grouping: bool,
return_tensors: Optional[Union[str, TensorType]],
**kwargs,
) -> BatchFeature:
processed_images = []
if do_reduce_labels:
images = self.reduce_label(images)
# Group images by shape for more efficient batch processing
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
resized_images_grouped = {}
# Process each group of images with the same shape
for shape, stacked_images in grouped_images.items():
if do_resize:
stacked_images = self.resize(image=stacked_images, size=size, interpolation=interpolation)
resized_images_grouped[shape] = stacked_images
# Reorder images to original sequence
resized_images = reorder_images(resized_images_grouped, grouped_images_index)
# Group again after resizing (in case resize produced different sizes)
grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_center_crop:
stacked_images = self.center_crop(image=stacked_images, size=crop_size)
if do_rescale:
stacked_images = self.rescale(image=stacked_images, scale=rescale_factor)
if do_flip_channel_order:
# For batched images, we need to handle them all at once
if stacked_images.ndim > 3 and stacked_images.shape[1] >= 3:
# Flip RGB → BGR for batched images
flipped = stacked_images.clone()
flipped[:, 0:3] = stacked_images[:, [2, 1, 0], ...]
stacked_images = flipped
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
# Stack all processed images if return_tensors is specified
return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors)
def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[list[tuple]] = None):
"""
Converts the output of [`MobileNetV2ForSemanticSegmentation`] into semantic segmentation maps. Only supports PyTorch.
Args:
outputs ([`MobileNetV2ForSemanticSegmentation`]):
Raw outputs of the model.
target_sizes (`list[Tuple]` of length `batch_size`, *optional*):
List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,
predictions will not be resized.
Returns:
semantic_segmentation: `list[torch.Tensor]` of length `batch_size`, where each item is a semantic
segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
specified). Each entry of each `torch.Tensor` correspond to a semantic class id.
"""
logits = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(logits) != len(target_sizes):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
)
if is_torch_tensor(target_sizes):
target_sizes = target_sizes.numpy()
semantic_segmentation = []
for idx in range(len(logits)):
resized_logits = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False
)
semantic_map = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(semantic_map)
else:
semantic_segmentation = logits.argmax(dim=1)
semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
__all__ = ["MobileViTImageProcessorFast"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/mobilevit/__init__.py | src/transformers/models/mobilevit/__init__.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_mobilevit import *
from .feature_extraction_mobilevit import *
from .image_processing_mobilevit import *
from .image_processing_mobilevit_fast import *
from .modeling_mobilevit import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/mobilevit/configuration_mobilevit.py | src/transformers/models/mobilevit/configuration_mobilevit.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MobileViT model configuration"""
from ...configuration_utils import PreTrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
class MobileViTConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`MobileViTModel`]. It is used to instantiate a
MobileViT model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the MobileViT
[apple/mobilevit-small](https://huggingface.co/apple/mobilevit-small) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
image_size (`int`, *optional*, defaults to 256):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 2):
The size (resolution) of each patch.
hidden_sizes (`list[int]`, *optional*, defaults to `[144, 192, 240]`):
Dimensionality (hidden size) of the Transformer encoders at each stage.
neck_hidden_sizes (`list[int]`, *optional*, defaults to `[16, 32, 64, 96, 128, 160, 640]`):
The number of channels for the feature maps of the backbone.
num_attention_heads (`int`, *optional*, defaults to 4):
Number of attention heads for each attention layer in the Transformer encoder.
mlp_ratio (`float`, *optional*, defaults to 2.0):
The ratio of the number of channels in the output of the MLP to the number of channels in the input.
expand_ratio (`float`, *optional*, defaults to 4.0):
Expansion factor for the MobileNetv2 layers.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the Transformer encoder and convolution layers.
conv_kernel_size (`int`, *optional*, defaults to 3):
The size of the convolutional kernel in the MobileViT layer.
output_stride (`int`, *optional*, defaults to 32):
The ratio of the spatial resolution of the output to the resolution of the input image.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the Transformer encoder.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
classifier_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for attached classifiers.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
aspp_out_channels (`int`, *optional*, defaults to 256):
Number of output channels used in the ASPP layer for semantic segmentation.
atrous_rates (`list[int]`, *optional*, defaults to `[6, 12, 18]`):
Dilation (atrous) factors used in the ASPP layer for semantic segmentation.
aspp_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the ASPP layer for semantic segmentation.
semantic_loss_ignore_index (`int`, *optional*, defaults to 255):
The index that is ignored by the loss function of the semantic segmentation model.
Example:
```python
>>> from transformers import MobileViTConfig, MobileViTModel
>>> # Initializing a mobilevit-small style configuration
>>> configuration = MobileViTConfig()
>>> # Initializing a model from the mobilevit-small style configuration
>>> model = MobileViTModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "mobilevit"
def __init__(
self,
num_channels=3,
image_size=256,
patch_size=2,
hidden_sizes=[144, 192, 240],
neck_hidden_sizes=[16, 32, 64, 96, 128, 160, 640],
num_attention_heads=4,
mlp_ratio=2.0,
expand_ratio=4.0,
hidden_act="silu",
conv_kernel_size=3,
output_stride=32,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.0,
classifier_dropout_prob=0.1,
initializer_range=0.02,
layer_norm_eps=1e-5,
qkv_bias=True,
aspp_out_channels=256,
atrous_rates=[6, 12, 18],
aspp_dropout_prob=0.1,
semantic_loss_ignore_index=255,
**kwargs,
):
super().__init__(**kwargs)
self.num_channels = num_channels
self.image_size = image_size
self.patch_size = patch_size
self.hidden_sizes = hidden_sizes
self.neck_hidden_sizes = neck_hidden_sizes
self.num_attention_heads = num_attention_heads
self.mlp_ratio = mlp_ratio
self.expand_ratio = expand_ratio
self.hidden_act = hidden_act
self.conv_kernel_size = conv_kernel_size
self.output_stride = output_stride
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.classifier_dropout_prob = classifier_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.qkv_bias = qkv_bias
# decode head attributes for semantic segmentation
self.aspp_out_channels = aspp_out_channels
self.atrous_rates = atrous_rates
self.aspp_dropout_prob = aspp_dropout_prob
self.semantic_loss_ignore_index = semantic_loss_ignore_index
__all__ = ["MobileViTConfig"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/mobilevit/image_processing_mobilevit.py | src/transformers/models/mobilevit/image_processing_mobilevit.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image processor class for MobileViT."""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, get_resize_output_image_size, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
infer_channel_dimension_format,
is_scaled_image,
make_flat_list_of_images,
to_numpy_array,
valid_images,
validate_preprocess_arguments,
)
from ...processing_utils import ImagesKwargs
from ...utils import (
TensorType,
filter_out_non_signature_kwargs,
is_torch_available,
is_torch_tensor,
is_vision_available,
logging,
)
from ...utils.import_utils import requires
if is_vision_available():
import PIL
if is_torch_available():
import torch
logger = logging.get_logger(__name__)
class MobileVitImageProcessorKwargs(ImagesKwargs, total=False):
"""
do_flip_channel_order (`bool`, *optional*, defaults to `self.do_flip_channel_order`):
Whether to flip the color channels from RGB to BGR or vice versa.
do_reduce_labels (`bool`, *optional*, defaults to `self.do_reduce_labels`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0
is used for background, and background itself is not included in all classes of a dataset (e.g.
ADE20k). The background label will be replaced by 255.
"""
do_flip_channel_order: bool
do_reduce_labels: bool
@requires(backends=("vision",))
class MobileViTImageProcessor(BaseImageProcessor):
r"""
Constructs a MobileViT image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
`do_resize` parameter in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`):
Controls the size of the output image after resizing. Can be overridden by the `size` parameter in the
`preprocess` method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
Defines the resampling filter to use if resizing the image. Can be overridden by the `resample` parameter
in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
`preprocess` method.
do_center_crop (`bool`, *optional*, defaults to `True`):
Whether to crop the input at the center. If the input size is smaller than `crop_size` along any edge, the
image is padded with 0's and then center cropped. Can be overridden by the `do_center_crop` parameter in
the `preprocess` method.
crop_size (`dict[str, int]`, *optional*, defaults to `{"height": 256, "width": 256}`):
Desired output size `(size["height"], size["width"])` when applying center-cropping. Can be overridden by
the `crop_size` parameter in the `preprocess` method.
do_flip_channel_order (`bool`, *optional*, defaults to `True`):
Whether to flip the color channels from RGB to BGR. Can be overridden by the `do_flip_channel_order`
parameter in the `preprocess` method.
do_reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is
used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The
background label will be replaced by 255. Can be overridden by the `do_reduce_labels` parameter in the
`preprocess` method.
"""
model_input_names = ["pixel_values"]
valid_kwargs = MobileVitImageProcessorKwargs
def __init__(
self,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
resample: PILImageResampling = PILImageResampling.BILINEAR,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_center_crop: bool = True,
crop_size: Optional[dict[str, int]] = None,
do_flip_channel_order: bool = True,
do_reduce_labels: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
size = size if size is not None else {"shortest_edge": 224}
size = get_size_dict(size, default_to_square=False)
crop_size = crop_size if crop_size is not None else {"height": 256, "width": 256}
crop_size = get_size_dict(crop_size, param_name="crop_size")
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_flip_channel_order = do_flip_channel_order
self.do_reduce_labels = do_reduce_labels
# Copied from transformers.models.mobilenet_v1.image_processing_mobilenet_v1.MobileNetV1ImageProcessor.resize with PILImageResampling.BICUBIC->PILImageResampling.BILINEAR
def resize(
self,
image: np.ndarray,
size: dict[str, int],
resample: PILImageResampling = PILImageResampling.BILINEAR,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
resized to keep the input aspect ratio.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
default_to_square = True
if "shortest_edge" in size:
size = size["shortest_edge"]
default_to_square = False
elif "height" in size and "width" in size:
size = (size["height"], size["width"])
else:
raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.")
output_size = get_resize_output_image_size(
image,
size=size,
default_to_square=default_to_square,
input_data_format=input_data_format,
)
return resize(
image,
size=output_size,
resample=resample,
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
def flip_channel_order(
self,
image: np.ndarray,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> np.ndarray:
"""
Flip the color channels from RGB to BGR or vice versa.
Args:
image (`np.ndarray`):
The image, represented as a numpy array.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
return flip_channel_order(image, data_format=data_format, input_data_format=input_data_format)
# Copied from transformers.models.beit.image_processing_beit.BeitImageProcessor.reduce_label
def reduce_label(self, label: ImageInput) -> np.ndarray:
label = to_numpy_array(label)
# Avoid using underflow conversion
label[label == 0] = 255
label = label - 1
label[label == 254] = 255
return label
def __call__(self, images, segmentation_maps=None, **kwargs):
"""
Preprocesses a batch of images and optionally segmentation maps.
Overrides the `__call__` method of the `Preprocessor` class so that both images and segmentation maps can be
passed in as positional arguments.
"""
return super().__call__(images, segmentation_maps=segmentation_maps, **kwargs)
def _preprocess(
self,
image: ImageInput,
do_reduce_labels: bool,
do_resize: bool,
do_rescale: bool,
do_center_crop: bool,
do_flip_channel_order: bool,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
rescale_factor: Optional[float] = None,
crop_size: Optional[dict[str, int]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
):
if do_reduce_labels:
image = self.reduce_label(image)
if do_resize:
image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
if do_center_crop:
image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)
if do_flip_channel_order:
image = self.flip_channel_order(image, input_data_format=input_data_format)
return image
def _preprocess_image(
self,
image: ImageInput,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_center_crop: Optional[bool] = None,
crop_size: Optional[dict[str, int]] = None,
do_flip_channel_order: Optional[bool] = None,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> np.ndarray:
"""Preprocesses a single image."""
# All transformations expect numpy arrays.
image = to_numpy_array(image)
if do_rescale and is_scaled_image(image):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
input_data_format = infer_channel_dimension_format(image)
image = self._preprocess(
image=image,
do_reduce_labels=False,
do_resize=do_resize,
size=size,
resample=resample,
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_center_crop=do_center_crop,
crop_size=crop_size,
do_flip_channel_order=do_flip_channel_order,
input_data_format=input_data_format,
)
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
return image
def _preprocess_mask(
self,
segmentation_map: ImageInput,
do_reduce_labels: Optional[bool] = None,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
do_center_crop: Optional[bool] = None,
crop_size: Optional[dict[str, int]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> np.ndarray:
"""Preprocesses a single mask."""
segmentation_map = to_numpy_array(segmentation_map)
# Add channel dimension if missing - needed for certain transformations
if segmentation_map.ndim == 2:
added_channel_dim = True
segmentation_map = segmentation_map[None, ...]
input_data_format = ChannelDimension.FIRST
else:
added_channel_dim = False
if input_data_format is None:
input_data_format = infer_channel_dimension_format(segmentation_map, num_channels=1)
segmentation_map = self._preprocess(
image=segmentation_map,
do_reduce_labels=do_reduce_labels,
do_resize=do_resize,
size=size,
resample=PILImageResampling.NEAREST,
do_rescale=False,
do_center_crop=do_center_crop,
crop_size=crop_size,
do_flip_channel_order=False,
input_data_format=input_data_format,
)
# Remove extra channel dimension if added for processing
if added_channel_dim:
segmentation_map = segmentation_map.squeeze(0)
segmentation_map = segmentation_map.astype(np.int64)
return segmentation_map
@filter_out_non_signature_kwargs()
def preprocess(
self,
images: ImageInput,
segmentation_maps: Optional[ImageInput] = None,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_center_crop: Optional[bool] = None,
crop_size: Optional[dict[str, int]] = None,
do_flip_channel_order: Optional[bool] = None,
do_reduce_labels: Optional[bool] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: ChannelDimension = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
segmentation_maps (`ImageInput`, *optional*):
Segmentation map to preprocess.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image by rescale factor.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the image.
crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`):
Size of the center crop if `do_center_crop` is set to `True`.
do_flip_channel_order (`bool`, *optional*, defaults to `self.do_flip_channel_order`):
Whether to flip the channel order of the image.
do_reduce_labels (`bool`, *optional*, defaults to `self.do_reduce_labels`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0
is used for background, and background itself is not included in all classes of a dataset (e.g.
ADE20k). The background label will be replaced by 255.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
do_flip_channel_order = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
size = size if size is not None else self.size
size = get_size_dict(size, default_to_square=False)
crop_size = crop_size if crop_size is not None else self.crop_size
crop_size = get_size_dict(crop_size, param_name="crop_size")
do_reduce_labels = do_reduce_labels if do_reduce_labels is not None else self.do_reduce_labels
images = make_flat_list_of_images(images)
if segmentation_maps is not None:
segmentation_maps = make_flat_list_of_images(segmentation_maps, expected_ndims=2)
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
if segmentation_maps is not None and not valid_images(segmentation_maps):
raise ValueError(
"Invalid segmentation map type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor"
)
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_center_crop=do_center_crop,
crop_size=crop_size,
do_resize=do_resize,
size=size,
resample=resample,
)
images = [
self._preprocess_image(
image=img,
do_resize=do_resize,
size=size,
resample=resample,
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_center_crop=do_center_crop,
crop_size=crop_size,
do_flip_channel_order=do_flip_channel_order,
data_format=data_format,
input_data_format=input_data_format,
)
for img in images
]
data = {"pixel_values": images}
if segmentation_maps is not None:
segmentation_maps = [
self._preprocess_mask(
segmentation_map=segmentation_map,
do_reduce_labels=do_reduce_labels,
do_resize=do_resize,
size=size,
do_center_crop=do_center_crop,
crop_size=crop_size,
input_data_format=input_data_format,
)
for segmentation_map in segmentation_maps
]
data["labels"] = segmentation_maps
return BatchFeature(data=data, tensor_type=return_tensors)
# Copied from transformers.models.beit.image_processing_beit.BeitImageProcessor.post_process_semantic_segmentation with Beit->MobileViT
def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[list[tuple]] = None):
"""
Converts the output of [`MobileViTForSemanticSegmentation`] into semantic segmentation maps.
Args:
outputs ([`MobileViTForSemanticSegmentation`]):
Raw outputs of the model.
target_sizes (`list[Tuple]` of length `batch_size`, *optional*):
List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,
predictions will not be resized.
Returns:
semantic_segmentation: `list[torch.Tensor]` of length `batch_size`, where each item is a semantic
segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
specified). Each entry of each `torch.Tensor` correspond to a semantic class id.
"""
logits = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(logits) != len(target_sizes):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
)
if is_torch_tensor(target_sizes):
target_sizes = target_sizes.numpy()
semantic_segmentation = []
for idx in range(len(logits)):
resized_logits = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False
)
semantic_map = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(semantic_map)
else:
semantic_segmentation = logits.argmax(dim=1)
semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
__all__ = ["MobileViTImageProcessor"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/mobilevit/convert_mlcvnets_to_pytorch.py | src/transformers/models/mobilevit/convert_mlcvnets_to_pytorch.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert MobileViT checkpoints from the ml-cvnets library."""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
def get_mobilevit_config(mobilevit_name):
config = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
config.hidden_sizes = [144, 192, 240]
config.neck_hidden_sizes = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
config.hidden_sizes = [96, 120, 144]
config.neck_hidden_sizes = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
config.hidden_sizes = [64, 80, 96]
config.neck_hidden_sizes = [16, 16, 24, 48, 64, 80, 320]
config.hidden_dropout_prob = 0.05
config.expand_ratio = 2.0
if mobilevit_name.startswith("deeplabv3_"):
config.image_size = 512
config.output_stride = 16
config.num_labels = 21
filename = "pascal-voc-id2label.json"
else:
config.num_labels = 1000
filename = "imagenet-1k-id2label.json"
repo_id = "huggingface/label-files"
id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
id2label = {int(k): v for k, v in id2label.items()}
config.id2label = id2label
config.label2id = {v: k for k, v in id2label.items()}
return config
def rename_key(name, base_model=False):
for i in range(1, 6):
if f"layer_{i}." in name:
name = name.replace(f"layer_{i}.", f"encoder.layer.{i - 1}.")
if "conv_1." in name:
name = name.replace("conv_1.", "conv_stem.")
if ".block." in name:
name = name.replace(".block.", ".")
if "exp_1x1" in name:
name = name.replace("exp_1x1", "expand_1x1")
if "red_1x1" in name:
name = name.replace("red_1x1", "reduce_1x1")
if ".local_rep.conv_3x3." in name:
name = name.replace(".local_rep.conv_3x3.", ".conv_kxk.")
if ".local_rep.conv_1x1." in name:
name = name.replace(".local_rep.conv_1x1.", ".conv_1x1.")
if ".norm." in name:
name = name.replace(".norm.", ".normalization.")
if ".conv." in name:
name = name.replace(".conv.", ".convolution.")
if ".conv_proj." in name:
name = name.replace(".conv_proj.", ".conv_projection.")
for i in range(0, 2):
for j in range(0, 4):
if f".{i}.{j}." in name:
name = name.replace(f".{i}.{j}.", f".{i}.layer.{j}.")
for i in range(2, 6):
for j in range(0, 4):
if f".{i}.{j}." in name:
name = name.replace(f".{i}.{j}.", f".{i}.")
if "expand_1x1" in name:
name = name.replace("expand_1x1", "downsampling_layer.expand_1x1")
if "conv_3x3" in name:
name = name.replace("conv_3x3", "downsampling_layer.conv_3x3")
if "reduce_1x1" in name:
name = name.replace("reduce_1x1", "downsampling_layer.reduce_1x1")
for i in range(2, 5):
if f".global_rep.{i}.weight" in name:
name = name.replace(f".global_rep.{i}.weight", ".layernorm.weight")
if f".global_rep.{i}.bias" in name:
name = name.replace(f".global_rep.{i}.bias", ".layernorm.bias")
if ".global_rep." in name:
name = name.replace(".global_rep.", ".transformer.")
if ".pre_norm_mha.0." in name:
name = name.replace(".pre_norm_mha.0.", ".layernorm_before.")
if ".pre_norm_mha.1.out_proj." in name:
name = name.replace(".pre_norm_mha.1.out_proj.", ".attention.output.dense.")
if ".pre_norm_ffn.0." in name:
name = name.replace(".pre_norm_ffn.0.", ".layernorm_after.")
if ".pre_norm_ffn.1." in name:
name = name.replace(".pre_norm_ffn.1.", ".intermediate.dense.")
if ".pre_norm_ffn.4." in name:
name = name.replace(".pre_norm_ffn.4.", ".output.dense.")
if ".transformer." in name:
name = name.replace(".transformer.", ".transformer.layer.")
if ".aspp_layer." in name:
name = name.replace(".aspp_layer.", ".")
if ".aspp_pool." in name:
name = name.replace(".aspp_pool.", ".")
if "seg_head." in name:
name = name.replace("seg_head.", "segmentation_head.")
if "segmentation_head.classifier.classifier." in name:
name = name.replace("segmentation_head.classifier.classifier.", "segmentation_head.classifier.")
if "classifier.fc." in name:
name = name.replace("classifier.fc.", "classifier.")
elif (not base_model) and ("segmentation_head." not in name):
name = "mobilevit." + name
return name
def convert_state_dict(orig_state_dict, model, base_model=False):
if base_model:
model_prefix = ""
else:
model_prefix = "mobilevit."
for key in orig_state_dict.copy():
val = orig_state_dict.pop(key)
if key[:8] == "encoder.":
key = key[8:]
if "qkv" in key:
key_split = key.split(".")
layer_num = int(key_split[0][6:]) - 1
transformer_num = int(key_split[3])
layer = model.get_submodule(f"{model_prefix}encoder.layer.{layer_num}")
dim = layer.transformer.layer[transformer_num].attention.attention.all_head_size
prefix = (
f"{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."
)
if "weight" in key:
orig_state_dict[prefix + "query.weight"] = val[:dim, :]
orig_state_dict[prefix + "key.weight"] = val[dim : dim * 2, :]
orig_state_dict[prefix + "value.weight"] = val[-dim:, :]
else:
orig_state_dict[prefix + "query.bias"] = val[:dim]
orig_state_dict[prefix + "key.bias"] = val[dim : dim * 2]
orig_state_dict[prefix + "value.bias"] = val[-dim:]
else:
orig_state_dict[rename_key(key, base_model)] = val
return orig_state_dict
# We will verify our results on an image of cute cats
def prepare_img():
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
im = Image.open(requests.get(url, stream=True).raw)
return im
@torch.no_grad()
def convert_movilevit_checkpoint(mobilevit_name, checkpoint_path, pytorch_dump_folder_path, push_to_hub=False):
"""
Copy/paste/tweak model's weights to our MobileViT structure.
"""
config = get_mobilevit_config(mobilevit_name)
# load original state_dict
state_dict = torch.load(checkpoint_path, map_location="cpu", weights_only=True)
# load 🤗 model
if mobilevit_name.startswith("deeplabv3_"):
model = MobileViTForSemanticSegmentation(config).eval()
else:
model = MobileViTForImageClassification(config).eval()
new_state_dict = convert_state_dict(state_dict, model)
model.load_state_dict(new_state_dict)
# Check outputs on an image, prepared by MobileViTImageProcessor
image_processor = MobileViTImageProcessor(crop_size=config.image_size, size=config.image_size + 32)
encoding = image_processor(images=prepare_img(), return_tensors="pt")
outputs = model(**encoding)
logits = outputs.logits
if mobilevit_name.startswith("deeplabv3_"):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
expected_logits = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
]
)
elif mobilevit_name == "deeplabv3_mobilevit_xs":
expected_logits = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
]
)
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
expected_logits = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
]
)
else:
raise ValueError(f"Unknown mobilevit_name: {mobilevit_name}")
assert torch.allclose(logits[0, :3, :3, :3], expected_logits, atol=1e-4)
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
expected_logits = torch.tensor([-0.9866, 0.2392, -1.1241])
elif mobilevit_name == "mobilevit_xs":
expected_logits = torch.tensor([-2.4761, -0.9399, -1.9587])
elif mobilevit_name == "mobilevit_xxs":
expected_logits = torch.tensor([-1.9364, -1.2327, -0.4653])
else:
raise ValueError(f"Unknown mobilevit_name: {mobilevit_name}")
assert torch.allclose(logits[0, :3], expected_logits, atol=1e-4)
Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
print(f"Saving model {mobilevit_name} to {pytorch_dump_folder_path}")
model.save_pretrained(pytorch_dump_folder_path)
print(f"Saving image processor to {pytorch_dump_folder_path}")
image_processor.save_pretrained(pytorch_dump_folder_path)
if push_to_hub:
model_mapping = {
"mobilevit_s": "mobilevit-small",
"mobilevit_xs": "mobilevit-x-small",
"mobilevit_xxs": "mobilevit-xx-small",
"deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small",
"deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small",
"deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small",
}
print("Pushing to the hub...")
model_name = model_mapping[mobilevit_name]
image_processor.push_to_hub(model_name, organization="apple")
model.push_to_hub(model_name, organization="apple")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--mobilevit_name",
default="mobilevit_s",
type=str,
help=(
"Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"
" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."
),
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model to the Hugging Face hub.",
)
args = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/vitpose/convert_vitpose_to_hf.py | src/transformers/models/vitpose/convert_vitpose_to_hf.py | # coding=utf-8
# Copyright 2024 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert VitPose checkpoints from the original repository.
URL: https://github.com/vitae-transformer/vitpose
Notebook to get the original logits: https://colab.research.google.com/drive/1QDX_2POTpl6JaZAV2WIFjuiqDsDwiqMZ?usp=sharing.
"""
import argparse
import os
import re
from typing import Optional
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import VitPoseBackboneConfig, VitPoseConfig, VitPoseForPoseEstimation, VitPoseImageProcessor
ORIGINAL_TO_CONVERTED_KEY_MAPPING = {
r"patch_embed.proj": "embeddings.patch_embeddings.projection",
r"pos_embed": "embeddings.position_embeddings",
r"blocks": "encoder.layer",
r"attn.proj": "attention.output.dense",
r"attn": "attention.self",
r"norm1": "layernorm_before",
r"norm2": "layernorm_after",
r"last_norm": "layernorm",
r"keypoint_head": "head",
r"final_layer": "conv",
}
MODEL_TO_FILE_NAME_MAPPING = {
# VitPose models, simple decoder
"vitpose-base-simple": "vitpose-b-simple.pth",
# VitPose models, classic decoder
"vitpose-base": "vitpose-b.pth",
# VitPose models, COCO-AIC-MPII
"vitpose-base-coco-aic-mpii": "vitpose_base_coco_aic_mpii.pth",
# VitPose+ models
"vitpose-plus-small": "vitpose+_small.pth",
"vitpose-plus-base": "vitpose+_base.pth",
"vitpose-plus-large": "vitpose+_large.pth",
"vitpose-plus-huge": "vitpose+_huge.pth",
}
def get_config(model_name):
if "plus" in model_name:
num_experts = 6
if "small" in model_name:
part_features = 96
out_indices = [12]
elif "base" in model_name:
part_features = 192
out_indices = [12]
elif "large" in model_name:
part_features = 256
out_indices = [24]
elif "huge" in model_name:
part_features = 320
out_indices = [32]
else:
raise ValueError(f"Model {model_name} not supported")
else:
num_experts = 1
part_features = 0
# size of the architecture
if "small" in model_name:
hidden_size = 384
num_hidden_layers = 12
num_attention_heads = 12
elif "large" in model_name:
hidden_size = 1024
num_hidden_layers = 24
num_attention_heads = 16
elif "huge" in model_name:
hidden_size = 1280
num_hidden_layers = 32
num_attention_heads = 16
backbone_config = VitPoseBackboneConfig(
out_indices=out_indices,
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
num_experts=num_experts,
part_features=part_features,
)
use_simple_decoder = "simple" in model_name
edges = [
[15, 13],
[13, 11],
[16, 14],
[14, 12],
[11, 12],
[5, 11],
[6, 12],
[5, 6],
[5, 7],
[6, 8],
[7, 9],
[8, 10],
[1, 2],
[0, 1],
[0, 2],
[1, 3],
[2, 4],
[3, 5],
[4, 6],
]
id2label = {
0: "Nose",
1: "L_Eye",
2: "R_Eye",
3: "L_Ear",
4: "R_Ear",
5: "L_Shoulder",
6: "R_Shoulder",
7: "L_Elbow",
8: "R_Elbow",
9: "L_Wrist",
10: "R_Wrist",
11: "L_Hip",
12: "R_Hip",
13: "L_Knee",
14: "R_Knee",
15: "L_Ankle",
16: "R_Ankle",
}
label2id = {v: k for k, v in id2label.items()}
config = VitPoseConfig(
backbone_config=backbone_config,
num_labels=17,
use_simple_decoder=use_simple_decoder,
edges=edges,
id2label=id2label,
label2id=label2id,
)
return config
def convert_old_keys_to_new_keys(state_dict_keys: Optional[dict] = None):
"""
This function should be applied only once, on the concatenated keys to efficiently rename using
the key mappings.
"""
output_dict = {}
if state_dict_keys is not None:
old_text = "\n".join(state_dict_keys)
new_text = old_text
for pattern, replacement in ORIGINAL_TO_CONVERTED_KEY_MAPPING.items():
if replacement is None:
new_text = re.sub(pattern, "", new_text) # an empty line
continue
new_text = re.sub(pattern, replacement, new_text)
output_dict = dict(zip(old_text.split("\n"), new_text.split("\n")))
return output_dict
# We will verify our results on a COCO image
def prepare_img():
url = "http://images.cocodataset.org/val2017/000000000139.jpg"
image = Image.open(requests.get(url, stream=True).raw)
return image
@torch.no_grad()
def write_model(model_name, model_path, push_to_hub, check_logits=True):
# ------------------------------------------------------------
# Vision model params and config
# ------------------------------------------------------------
# params from config
config = get_config(model_name)
# ------------------------------------------------------------
# Convert weights
# ------------------------------------------------------------
# load original state_dict
filename = MODEL_TO_FILE_NAME_MAPPING[model_name]
print(f"Fetching all parameters from the checkpoint at {filename}...")
checkpoint_path = hf_hub_download(
repo_id="nielsr/vitpose-original-checkpoints", filename=filename, repo_type="model"
)
print("Converting model...")
original_state_dict = torch.load(checkpoint_path, map_location="cpu", weights_only=True)["state_dict"]
all_keys = list(original_state_dict.keys())
new_keys = convert_old_keys_to_new_keys(all_keys)
dim = config.backbone_config.hidden_size
state_dict = {}
for key in all_keys:
new_key = new_keys[key]
value = original_state_dict[key]
if re.search("associate_heads", new_key) or re.search("backbone.cls_token", new_key):
# This associated_heads is concept of auxiliary head so does not require in inference stage.
# backbone.cls_token is optional forward function for dynamically change of size, see detail in https://github.com/ViTAE-Transformer/ViTPose/issues/34
pass
elif re.search("qkv", new_key):
state_dict[new_key.replace("self.qkv", "attention.query")] = value[:dim]
state_dict[new_key.replace("self.qkv", "attention.key")] = value[dim : dim * 2]
state_dict[new_key.replace("self.qkv", "attention.value")] = value[-dim:]
elif re.search("head", new_key) and not config.use_simple_decoder:
# Pattern for deconvolution layers
deconv_pattern = r"deconv_layers\.(0|3)\.weight"
new_key = re.sub(deconv_pattern, lambda m: f"deconv{int(m.group(1)) // 3 + 1}.weight", new_key)
# Pattern for batch normalization layers
bn_patterns = [
(r"deconv_layers\.(\d+)\.weight", r"batchnorm\1.weight"),
(r"deconv_layers\.(\d+)\.bias", r"batchnorm\1.bias"),
(r"deconv_layers\.(\d+)\.running_mean", r"batchnorm\1.running_mean"),
(r"deconv_layers\.(\d+)\.running_var", r"batchnorm\1.running_var"),
(r"deconv_layers\.(\d+)\.num_batches_tracked", r"batchnorm\1.num_batches_tracked"),
]
for pattern, replacement in bn_patterns:
if re.search(pattern, new_key):
# Convert the layer number to the correct batch norm index
layer_num = int(re.search(pattern, key).group(1))
bn_num = layer_num // 3 + 1
new_key = re.sub(pattern, replacement.replace(r"\1", str(bn_num)), new_key)
state_dict[new_key] = value
else:
state_dict[new_key] = value
print("Loading the checkpoint in a Vitpose model.")
model = VitPoseForPoseEstimation(config)
model.eval()
model.load_state_dict(state_dict)
print("Checkpoint loaded successfully.")
# create image processor
image_processor = VitPoseImageProcessor()
# verify image processor
image = prepare_img()
boxes = [[[412.8, 157.61, 53.05, 138.01], [384.43, 172.21, 15.12, 35.74]]]
pixel_values = image_processor(images=image, boxes=boxes, return_tensors="pt").pixel_values
filepath = hf_hub_download(repo_id="nielsr/test-image", filename="vitpose_batch_data.pt", repo_type="dataset")
original_pixel_values = torch.load(filepath, map_location="cpu", weights_only=True)["img"]
# we allow for a small difference in the pixel values due to the original repository using cv2
assert torch.allclose(pixel_values, original_pixel_values, atol=1e-1)
dataset_index = torch.tensor([0])
with torch.no_grad():
print("Shape of original_pixel_values: ", original_pixel_values.shape)
print("First values of original_pixel_values: ", original_pixel_values[0, 0, :3, :3])
# first forward pass
outputs = model(original_pixel_values, dataset_index=dataset_index)
output_heatmap = outputs.heatmaps
print("Shape of output_heatmap: ", output_heatmap.shape)
print("First values: ", output_heatmap[0, 0, :3, :3])
# second forward pass (flipped)
# this is done since the model uses `flip_test=True` in its test config
original_pixel_values_flipped = torch.flip(original_pixel_values, [3])
outputs_flipped = model(
original_pixel_values_flipped,
dataset_index=dataset_index,
flip_pairs=torch.tensor([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16]]),
)
output_flipped_heatmap = outputs_flipped.heatmaps
outputs.heatmaps = (output_heatmap + output_flipped_heatmap) * 0.5
# Verify pose_results
pose_results = image_processor.post_process_pose_estimation(outputs, boxes=boxes)[0]
if check_logits:
# Simple decoder checkpoints
if model_name == "vitpose-base-simple":
assert torch.allclose(
pose_results[1]["keypoints"][0],
torch.tensor([3.98180511e02, 1.81808380e02]),
atol=5e-2,
)
assert torch.allclose(
pose_results[1]["scores"][0],
torch.tensor([8.66642594e-01]),
atol=5e-2,
)
# Classic decoder checkpoints
elif model_name == "vitpose-base":
assert torch.allclose(
pose_results[1]["keypoints"][0],
torch.tensor([3.9807913e02, 1.8182812e02]),
atol=5e-2,
)
assert torch.allclose(
pose_results[1]["scores"][0],
torch.tensor([8.8235235e-01]),
atol=5e-2,
)
# COCO-AIC-MPII checkpoints
elif model_name == "vitpose-base-coco-aic-mpii":
assert torch.allclose(
pose_results[1]["keypoints"][0],
torch.tensor([3.98305542e02, 1.81741592e02]),
atol=5e-2,
)
assert torch.allclose(
pose_results[1]["scores"][0],
torch.tensor([8.69966745e-01]),
atol=5e-2,
)
# VitPose+ models
elif model_name == "vitpose-plus-small":
assert torch.allclose(
pose_results[1]["keypoints"][0],
torch.tensor([398.1597, 181.6902]),
atol=5e-2,
)
assert torch.allclose(
pose_results[1]["scores"][0],
torch.tensor(0.9051),
atol=5e-2,
)
elif model_name == "vitpose-plus-base":
assert torch.allclose(
pose_results[1]["keypoints"][0],
torch.tensor([3.98201294e02, 1.81728302e02]),
atol=5e-2,
)
assert torch.allclose(
pose_results[1]["scores"][0],
torch.tensor([8.75046968e-01]),
atol=5e-2,
)
elif model_name == "vitpose-plus-large":
assert torch.allclose(
pose_results[1]["keypoints"][0],
torch.tensor([398.1409, 181.7412]),
atol=5e-2,
)
assert torch.allclose(
pose_results[1]["scores"][0],
torch.tensor(0.8746),
atol=5e-2,
)
elif model_name == "vitpose-plus-huge":
assert torch.allclose(
pose_results[1]["keypoints"][0],
torch.tensor([398.2079, 181.8026]),
atol=5e-2,
)
assert torch.allclose(
pose_results[1]["scores"][0],
torch.tensor(0.8693),
atol=5e-2,
)
else:
raise ValueError("Model not supported")
print("Conversion successfully done.")
if model_path is not None:
os.makedirs(model_path, exist_ok=True)
model.save_pretrained(model_path)
image_processor.save_pretrained(model_path)
if push_to_hub:
print(f"Pushing model and image processor for {model_name} to hub")
# we created a community organization on the hub for this model
# maintained by the Transformers team
model.push_to_hub(f"usyd-community/{model_name}")
image_processor.push_to_hub(f"usyd-community/{model_name}")
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="vitpose-base-simple",
choices=MODEL_TO_FILE_NAME_MAPPING.keys(),
type=str,
help="Name of the VitPose model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to store the converted model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model to the Hugging Face hub.",
)
parser.add_argument(
"--check_logits", action="store_false", help="Whether or not to verify the logits of the converted model."
)
args = parser.parse_args()
write_model(
model_path=args.pytorch_dump_folder_path,
model_name=args.model_name,
push_to_hub=args.push_to_hub,
check_logits=args.check_logits,
)
if __name__ == "__main__":
main()
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/vitpose/image_processing_vitpose_fast.py | src/transformers/models/vitpose/image_processing_vitpose_fast.py | # coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for VitPose."""
import itertools
from typing import TYPE_CHECKING, Optional, Union
import numpy as np
import torch
from ...image_processing_utils import BatchFeature
from ...image_processing_utils_fast import BaseImageProcessorFast
from ...image_transforms import group_images_by_shape, reorder_images
from ...image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ImageInput, SizeDict
from ...processing_utils import Unpack
from ...utils import TensorType, auto_docstring
from .image_processing_vitpose import (
VitPoseImageProcessorKwargs,
box_to_center_and_scale,
coco_to_pascal_voc,
get_keypoint_predictions,
get_warp_matrix,
post_dark_unbiased_data_processing,
scipy_warp_affine,
transform_preds,
)
if TYPE_CHECKING:
from .modeling_vitpose import VitPoseEstimatorOutput
@auto_docstring
class VitPoseImageProcessorFast(BaseImageProcessorFast):
image_mean = IMAGENET_DEFAULT_MEAN
image_std = IMAGENET_DEFAULT_STD
size = {"height": 256, "width": 192}
do_rescale = True
do_normalize = True
do_affine_transform = True
normalize_factor = 200.0
valid_kwargs = VitPoseImageProcessorKwargs
model_input_names = ["pixel_values"]
def torch_affine_transform(
self,
image: torch.Tensor,
center: tuple[float],
scale: tuple[float],
rotation: float,
size: SizeDict,
) -> torch.Tensor:
"""
Apply an affine transformation to a torch tensor image.
Args:
image (`torch.Tensor`):
Image tensor of shape (C, H, W) to transform.
center (`tuple[float]`):
Center of the bounding box (x, y).
scale (`tuple[float]`):
Scale of the bounding box with respect to height/width.
rotation (`float`):
Rotation angle in degrees.
size (`SizeDict`):
Size of the destination image.
Returns:
`torch.Tensor`: The transformed image.
"""
transformation = get_warp_matrix(
rotation, center * 2.0, np.array((size.width, size.height)) - 1.0, scale * 200.0
)
# Convert tensor to numpy (channels last) for scipy_warp_affine
image_np = image.permute(1, 2, 0).cpu().numpy()
transformed_np = scipy_warp_affine(src=image_np, M=transformation, size=(size.height, size.width))
# Convert back to torch tensor (channels first)
transformed = torch.from_numpy(transformed_np).permute(2, 0, 1).to(image.device)
return transformed
@auto_docstring
def preprocess(
self,
images: ImageInput,
boxes: Union[list[list[float]], np.ndarray],
**kwargs: Unpack[VitPoseImageProcessorKwargs],
) -> BatchFeature:
r"""
boxes (`list[list[list[float]]]` or `np.ndarray`):
List or array of bounding boxes for each image. Each box should be a list of 4 floats representing the
bounding box coordinates in COCO format (top_left_x, top_left_y, width, height).
"""
return super().preprocess(images, boxes, **kwargs)
def _preprocess(
self,
images: list[torch.Tensor],
boxes: Union[list, np.ndarray],
do_affine_transform: bool,
size: SizeDict,
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: Union[float, tuple[float]],
image_std: Union[float, tuple[float]],
disable_grouping: bool,
return_tensors: Optional[Union[str, TensorType]],
**kwargs,
) -> BatchFeature:
"""
Preprocess images with affine transformations based on bounding boxes.
"""
if len(images) != len(boxes):
raise ValueError(f"Number of images and boxes must match: {len(images)} != {len(boxes)}")
# Apply affine transformation for each image and each box
if do_affine_transform:
transformed_images = []
for image, image_boxes in zip(images, boxes):
for box in image_boxes:
center, scale = box_to_center_and_scale(
box,
image_width=size.width,
image_height=size.height,
normalize_factor=self.normalize_factor,
)
transformed_image = self.torch_affine_transform(image, center, scale, rotation=0, size=size)
transformed_images.append(transformed_image)
images = transformed_images
# Group images by shape for efficient batch processing
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
# Apply rescale and normalize
stacked_images = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
# Stack into batch tensor
return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors)
def keypoints_from_heatmaps(
self,
heatmaps: np.ndarray,
center: np.ndarray,
scale: np.ndarray,
kernel: int = 11,
):
"""
Get final keypoint predictions from heatmaps and transform them back to
the image.
Args:
heatmaps (`np.ndarray` of shape `(batch_size, num_keypoints, height, width])`):
Model predicted heatmaps.
center (`np.ndarray` of shape `(batch_size, 2)`):
Center of the bounding box (x, y).
scale (`np.ndarray` of shape `(batch_size, 2)`):
Scale of the bounding box wrt original images of width and height.
kernel (int, *optional*, defaults to 11):
Gaussian kernel size (K) for modulation, which should match the heatmap gaussian sigma when training.
K=17 for sigma=3 and k=11 for sigma=2.
Returns:
tuple: A tuple containing keypoint predictions and scores.
- preds (`np.ndarray` of shape `(batch_size, num_keypoints, 2)`):
Predicted keypoint location in images.
- scores (`np.ndarray` of shape `(batch_size, num_keypoints, 1)`):
Scores (confidence) of the keypoints.
"""
batch_size, _, height, width = heatmaps.shape
coords, scores = get_keypoint_predictions(heatmaps)
preds = post_dark_unbiased_data_processing(coords, heatmaps, kernel=kernel)
# Transform back to the image
for i in range(batch_size):
preds[i] = transform_preds(preds[i], center=center[i], scale=scale[i], output_size=[height, width])
return preds, scores
def post_process_pose_estimation(
self,
outputs: "VitPoseEstimatorOutput",
boxes: Union[list[list[list[float]]], np.ndarray],
kernel_size: int = 11,
threshold: Optional[float] = None,
target_sizes: Optional[Union[TensorType, list[tuple]]] = None,
):
"""
Transform the heatmaps into keypoint predictions and transform them back to the image.
Args:
outputs (`VitPoseEstimatorOutput`):
VitPoseForPoseEstimation model outputs.
boxes (`list[list[list[float]]]` or `np.ndarray`):
List or array of bounding boxes for each image. Each box should be a list of 4 floats representing the bounding
box coordinates in COCO format (top_left_x, top_left_y, width, height).
kernel_size (`int`, *optional*, defaults to 11):
Gaussian kernel size (K) for modulation.
threshold (`float`, *optional*, defaults to None):
Score threshold to keep object detection predictions.
target_sizes (`torch.Tensor` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size
`(height, width)` of each image in the batch. If unset, predictions will be resize with the default value.
Returns:
`list[list[Dict]]`: A list of dictionaries, each dictionary containing the keypoints and boxes for an image
in the batch as predicted by the model.
"""
# First compute centers and scales for each bounding box
batch_size, num_keypoints, _, _ = outputs.heatmaps.shape
if target_sizes is not None:
if batch_size != len(target_sizes):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
)
centers = np.zeros((batch_size, 2), dtype=np.float32)
scales = np.zeros((batch_size, 2), dtype=np.float32)
flattened_boxes = list(itertools.chain(*boxes))
for i in range(batch_size):
if target_sizes is not None:
image_width, image_height = target_sizes[i][0], target_sizes[i][1]
scale_factor = np.array([image_width, image_height, image_width, image_height])
flattened_boxes[i] = flattened_boxes[i] * scale_factor
width, height = self.size["width"], self.size["height"]
center, scale = box_to_center_and_scale(flattened_boxes[i], image_width=width, image_height=height)
centers[i, :] = center
scales[i, :] = scale
preds, scores = self.keypoints_from_heatmaps(
outputs.heatmaps.cpu().numpy(), centers, scales, kernel=kernel_size
)
all_boxes = np.zeros((batch_size, 4), dtype=np.float32)
all_boxes[:, 0:2] = centers[:, 0:2]
all_boxes[:, 2:4] = scales[:, 0:2]
poses = torch.tensor(preds)
scores = torch.tensor(scores)
labels = torch.arange(0, num_keypoints)
bboxes_xyxy = torch.tensor(coco_to_pascal_voc(all_boxes))
results: list[list[dict[str, torch.Tensor]]] = []
pose_bbox_pairs = zip(poses, scores, bboxes_xyxy)
for image_bboxes in boxes:
image_results: list[dict[str, torch.Tensor]] = []
for _ in image_bboxes:
# Unpack the next pose and bbox_xyxy from the iterator
pose, score, bbox_xyxy = next(pose_bbox_pairs)
score = score.squeeze()
keypoints_labels = labels
if threshold is not None:
keep = score > threshold
pose = pose[keep]
score = score[keep]
keypoints_labels = keypoints_labels[keep]
pose_result = {"keypoints": pose, "scores": score, "labels": keypoints_labels, "bbox": bbox_xyxy}
image_results.append(pose_result)
results.append(image_results)
return results
__all__ = ["VitPoseImageProcessorFast"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/vitpose/configuration_vitpose.py | src/transformers/models/vitpose/configuration_vitpose.py | # coding=utf-8
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""VitPose model configuration"""
from typing import Optional
from ...configuration_utils import PreTrainedConfig
from ...utils import logging
from ...utils.backbone_utils import verify_backbone_config_arguments
from ..auto.configuration_auto import CONFIG_MAPPING, AutoConfig
logger = logging.get_logger(__name__)
class VitPoseConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`VitPoseForPoseEstimation`]. It is used to instantiate a
VitPose model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the VitPose
[usyd-community/vitpose-base-simple](https://huggingface.co/usyd-community/vitpose-base-simple) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
backbone_config (`Union[dict, "PreTrainedConfig"]`, *optional*, defaults to `VitPoseBackboneConfig()`):
The configuration of the backbone model. Currently, only `backbone_config` with `vitpose_backbone` as `model_type` is supported.
backbone (`str`, *optional*):
Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
use_pretrained_backbone (`bool`, *optional*, defaults to `False`):
Whether to use pretrained weights for the backbone.
use_timm_backbone (`bool`, *optional*, defaults to `False`):
Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers
library.
backbone_kwargs (`dict`, *optional*):
Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
scale_factor (`int`, *optional*, defaults to 4):
Factor to upscale the feature maps coming from the ViT backbone.
use_simple_decoder (`bool`, *optional*, defaults to `True`):
Whether to use a `VitPoseSimpleDecoder` to decode the feature maps from the backbone into heatmaps. Otherwise it uses `VitPoseClassicDecoder`.
Example:
```python
>>> from transformers import VitPoseConfig, VitPoseForPoseEstimation
>>> # Initializing a VitPose configuration
>>> configuration = VitPoseConfig()
>>> # Initializing a model (with random weights) from the configuration
>>> model = VitPoseForPoseEstimation(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "vitpose"
sub_configs = {"backbone_config": AutoConfig}
def __init__(
self,
backbone_config: Optional[PreTrainedConfig] = None,
backbone: Optional[str] = None,
use_pretrained_backbone: bool = False,
use_timm_backbone: bool = False,
backbone_kwargs: Optional[dict] = None,
initializer_range: float = 0.02,
scale_factor: int = 4,
use_simple_decoder: bool = True,
**kwargs,
):
if use_pretrained_backbone:
logger.info(
"`use_pretrained_backbone` is `True`. For the pure inference purpose of VitPose weight do not set this value."
)
if use_timm_backbone:
raise ValueError("use_timm_backbone set `True` is not supported at the moment.")
if backbone_config is None and backbone is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `VitPose` backbone.")
backbone_config = CONFIG_MAPPING["vitpose_backbone"](out_indices=[4])
elif isinstance(backbone_config, dict):
backbone_model_type = backbone_config.get("model_type")
config_class = CONFIG_MAPPING[backbone_model_type]
backbone_config = config_class.from_dict(backbone_config)
verify_backbone_config_arguments(
use_timm_backbone=use_timm_backbone,
use_pretrained_backbone=use_pretrained_backbone,
backbone=backbone,
backbone_config=backbone_config,
backbone_kwargs=backbone_kwargs,
)
self.backbone_config = backbone_config
self.backbone = backbone
self.use_pretrained_backbone = use_pretrained_backbone
self.use_timm_backbone = use_timm_backbone
self.backbone_kwargs = backbone_kwargs
self.initializer_range = initializer_range
self.scale_factor = scale_factor
self.use_simple_decoder = use_simple_decoder
super().__init__(**kwargs)
__all__ = ["VitPoseConfig"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/vitpose/image_processing_vitpose.py | src/transformers/models/vitpose/image_processing_vitpose.py | # coding=utf-8
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image processor class for VitPose."""
import itertools
import math
from typing import TYPE_CHECKING, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import to_channel_dimension_format
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
infer_channel_dimension_format,
is_scaled_image,
make_flat_list_of_images,
to_numpy_array,
valid_images,
)
from ...processing_utils import ImagesKwargs
from ...utils import TensorType, is_scipy_available, is_torch_available, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
if is_scipy_available():
from scipy.linalg import inv
from scipy.ndimage import affine_transform, gaussian_filter
if TYPE_CHECKING:
from .modeling_vitpose import VitPoseEstimatorOutput
logger = logging.get_logger(__name__)
class VitPoseImageProcessorKwargs(ImagesKwargs, total=False):
r"""
do_affine_transform (`bool`, *optional*):
Whether to apply an affine transformation to the input images based on the bounding boxes.
normalize_factor (`float`, *optional*, defaults to `200.0`):
Width and height scale factor used for normalization when computing center and scale from bounding boxes.
"""
do_affine_transform: Optional[bool]
normalize_factor: Optional[float]
# inspired by https://github.com/ViTAE-Transformer/ViTPose/blob/d5216452796c90c6bc29f5c5ec0bdba94366768a/mmpose/datasets/datasets/base/kpt_2d_sview_rgb_img_top_down_dataset.py#L132
def box_to_center_and_scale(
box: Union[tuple, list, np.ndarray],
image_width: int,
image_height: int,
normalize_factor: float = 200.0,
padding_factor: float = 1.25,
):
"""
Encodes a bounding box in COCO format into (center, scale).
Args:
box (`Tuple`, `List`, or `np.ndarray`):
Bounding box in COCO format (top_left_x, top_left_y, width, height).
image_width (`int`):
Image width.
image_height (`int`):
Image height.
normalize_factor (`float`):
Width and height scale factor.
padding_factor (`float`):
Bounding box padding factor.
Returns:
tuple: A tuple containing center and scale.
- `np.ndarray` [float32](2,): Center of the bbox (x, y).
- `np.ndarray` [float32](2,): Scale of the bbox width & height.
"""
top_left_x, top_left_y, width, height = box[:4]
aspect_ratio = image_width / image_height
center = np.array([top_left_x + width * 0.5, top_left_y + height * 0.5], dtype=np.float32)
if width > aspect_ratio * height:
height = width * 1.0 / aspect_ratio
elif width < aspect_ratio * height:
width = height * aspect_ratio
scale = np.array([width / normalize_factor, height / normalize_factor], dtype=np.float32)
scale = scale * padding_factor
return center, scale
def coco_to_pascal_voc(bboxes: np.ndarray) -> np.ndarray:
"""
Converts bounding boxes from the COCO format to the Pascal VOC format.
In other words, converts from (top_left_x, top_left_y, width, height) format
to (top_left_x, top_left_y, bottom_right_x, bottom_right_y).
Args:
bboxes (`np.ndarray` of shape `(batch_size, 4)):
Bounding boxes in COCO format.
Returns:
`np.ndarray` of shape `(batch_size, 4) in Pascal VOC format.
"""
bboxes[:, 2] = bboxes[:, 2] + bboxes[:, 0] - 1
bboxes[:, 3] = bboxes[:, 3] + bboxes[:, 1] - 1
return bboxes
def get_keypoint_predictions(heatmaps: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
"""Get keypoint predictions from score maps.
Args:
heatmaps (`np.ndarray` of shape `(batch_size, num_keypoints, height, width)`):
Model predicted heatmaps.
Returns:
tuple: A tuple containing aggregated results.
- coords (`np.ndarray` of shape `(batch_size, num_keypoints, 2)`):
Predicted keypoint location.
- scores (`np.ndarray` of shape `(batch_size, num_keypoints, 1)`):
Scores (confidence) of the keypoints.
"""
if not isinstance(heatmaps, np.ndarray):
raise TypeError("Heatmaps should be np.ndarray")
if heatmaps.ndim != 4:
raise ValueError("Heatmaps should be 4-dimensional")
batch_size, num_keypoints, _, width = heatmaps.shape
heatmaps_reshaped = heatmaps.reshape((batch_size, num_keypoints, -1))
idx = np.argmax(heatmaps_reshaped, 2).reshape((batch_size, num_keypoints, 1))
scores = np.amax(heatmaps_reshaped, 2).reshape((batch_size, num_keypoints, 1))
preds = np.tile(idx, (1, 1, 2)).astype(np.float32)
preds[:, :, 0] = preds[:, :, 0] % width
preds[:, :, 1] = preds[:, :, 1] // width
preds = np.where(np.tile(scores, (1, 1, 2)) > 0.0, preds, -1)
return preds, scores
def post_dark_unbiased_data_processing(coords: np.ndarray, batch_heatmaps: np.ndarray, kernel: int = 3) -> np.ndarray:
"""DARK post-pocessing. Implemented by unbiased_data_processing.
Paper references:
- Huang et al. The Devil is in the Details: Delving into Unbiased Data Processing for Human Pose Estimation (CVPR 2020).
- Zhang et al. Distribution-Aware Coordinate Representation for Human Pose Estimation (CVPR 2020).
Args:
coords (`np.ndarray` of shape `(num_persons, num_keypoints, 2)`):
Initial coordinates of human pose.
batch_heatmaps (`np.ndarray` of shape `(batch_size, num_keypoints, height, width)`):
Batched heatmaps as predicted by the model.
A batch_size of 1 is used for the bottom up paradigm where all persons share the same heatmap.
A batch_size of `num_persons` is used for the top down paradigm where each person has its own heatmaps.
kernel (`int`, *optional*, defaults to 3):
Gaussian kernel size (K) for modulation.
Returns:
`np.ndarray` of shape `(num_persons, num_keypoints, 2)` ):
Refined coordinates.
"""
batch_size, num_keypoints, height, width = batch_heatmaps.shape
num_coords = coords.shape[0]
if not (batch_size == 1 or batch_size == num_coords):
raise ValueError("The batch size of heatmaps should be 1 or equal to the batch size of coordinates.")
radius = int((kernel - 1) // 2)
batch_heatmaps = np.array(
[
[gaussian_filter(heatmap, sigma=0.8, radius=(radius, radius), axes=(0, 1)) for heatmap in heatmaps]
for heatmaps in batch_heatmaps
]
)
batch_heatmaps = np.clip(batch_heatmaps, 0.001, 50)
batch_heatmaps = np.log(batch_heatmaps)
batch_heatmaps_pad = np.pad(batch_heatmaps, ((0, 0), (0, 0), (1, 1), (1, 1)), mode="edge").flatten()
# calculate indices for coordinates
index = coords[..., 0] + 1 + (coords[..., 1] + 1) * (width + 2)
index += (width + 2) * (height + 2) * np.arange(0, batch_size * num_keypoints).reshape(-1, num_keypoints)
index = index.astype(int).reshape(-1, 1)
i_ = batch_heatmaps_pad[index]
ix1 = batch_heatmaps_pad[index + 1]
iy1 = batch_heatmaps_pad[index + width + 2]
ix1y1 = batch_heatmaps_pad[index + width + 3]
ix1_y1_ = batch_heatmaps_pad[index - width - 3]
ix1_ = batch_heatmaps_pad[index - 1]
iy1_ = batch_heatmaps_pad[index - 2 - width]
# calculate refined coordinates using Newton's method
dx = 0.5 * (ix1 - ix1_)
dy = 0.5 * (iy1 - iy1_)
derivative = np.concatenate([dx, dy], axis=1)
derivative = derivative.reshape(num_coords, num_keypoints, 2, 1)
dxx = ix1 - 2 * i_ + ix1_
dyy = iy1 - 2 * i_ + iy1_
dxy = 0.5 * (ix1y1 - ix1 - iy1 + i_ + i_ - ix1_ - iy1_ + ix1_y1_)
hessian = np.concatenate([dxx, dxy, dxy, dyy], axis=1)
hessian = hessian.reshape(num_coords, num_keypoints, 2, 2)
hessian = np.linalg.inv(hessian + np.finfo(np.float32).eps * np.eye(2))
coords -= np.einsum("ijmn,ijnk->ijmk", hessian, derivative).squeeze()
return coords
def transform_preds(coords: np.ndarray, center: np.ndarray, scale: np.ndarray, output_size: np.ndarray) -> np.ndarray:
"""Get final keypoint predictions from heatmaps and apply scaling and
translation to map them back to the image.
Note:
num_keypoints: K
Args:
coords (`np.ndarray` of shape `(num_keypoints, ndims)`):
* If ndims=2, corrds are predicted keypoint location.
* If ndims=4, corrds are composed of (x, y, scores, tags)
* If ndims=5, corrds are composed of (x, y, scores, tags,
flipped_tags)
center (`np.ndarray` of shape `(2,)`):
Center of the bounding box (x, y).
scale (`np.ndarray` of shape `(2,)`):
Scale of the bounding box wrt original image of width and height.
output_size (`np.ndarray` of shape `(2,)`):
Size of the destination heatmaps in (height, width) format.
Returns:
np.ndarray: Predicted coordinates in the images.
"""
if coords.shape[1] not in (2, 4, 5):
raise ValueError("Coordinates need to have either 2, 4 or 5 dimensions.")
if len(center) != 2:
raise ValueError("Center needs to have 2 elements, one for x and one for y.")
if len(scale) != 2:
raise ValueError("Scale needs to consist of a width and height")
if len(output_size) != 2:
raise ValueError("Output size needs to consist of a height and width")
# Recover the scale which is normalized by a factor of 200.
scale = scale * 200.0
# We use unbiased data processing
scale_y = scale[1] / (output_size[0] - 1.0)
scale_x = scale[0] / (output_size[1] - 1.0)
target_coords = np.ones_like(coords)
target_coords[:, 0] = coords[:, 0] * scale_x + center[0] - scale[0] * 0.5
target_coords[:, 1] = coords[:, 1] * scale_y + center[1] - scale[1] * 0.5
return target_coords
def get_warp_matrix(theta: float, size_input: np.ndarray, size_dst: np.ndarray, size_target: np.ndarray):
"""
Calculate the transformation matrix under the constraint of unbiased. Paper ref: Huang et al. The Devil is in the
Details: Delving into Unbiased Data Processing for Human Pose Estimation (CVPR 2020).
Source: https://github.com/open-mmlab/mmpose/blob/master/mmpose/core/post_processing/post_transforms.py
Args:
theta (`float`):
Rotation angle in degrees.
size_input (`np.ndarray`):
Size of input image [width, height].
size_dst (`np.ndarray`):
Size of output image [width, height].
size_target (`np.ndarray`):
Size of ROI in input plane [w, h].
Returns:
`np.ndarray`: A matrix for transformation.
"""
theta = np.deg2rad(theta)
matrix = np.zeros((2, 3), dtype=np.float32)
scale_x = size_dst[0] / size_target[0]
scale_y = size_dst[1] / size_target[1]
matrix[0, 0] = math.cos(theta) * scale_x
matrix[0, 1] = -math.sin(theta) * scale_x
matrix[0, 2] = scale_x * (
-0.5 * size_input[0] * math.cos(theta) + 0.5 * size_input[1] * math.sin(theta) + 0.5 * size_target[0]
)
matrix[1, 0] = math.sin(theta) * scale_y
matrix[1, 1] = math.cos(theta) * scale_y
matrix[1, 2] = scale_y * (
-0.5 * size_input[0] * math.sin(theta) - 0.5 * size_input[1] * math.cos(theta) + 0.5 * size_target[1]
)
return matrix
def scipy_warp_affine(src, M, size):
"""
This function implements cv2.warpAffine function using affine_transform in scipy. See https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.affine_transform.html and https://docs.opencv.org/4.x/d4/d61/tutorial_warp_affine.html for more details.
Note: the original implementation of cv2.warpAffine uses cv2.INTER_LINEAR.
"""
channels = [src[..., i] for i in range(src.shape[-1])]
# Convert to a 3x3 matrix used by SciPy
M_scipy = np.vstack([M, [0, 0, 1]])
# If you have a matrix for the ‘push’ transformation, use its inverse (numpy.linalg.inv) in this function.
M_inv = inv(M_scipy)
M_inv[0, 0], M_inv[0, 1], M_inv[1, 0], M_inv[1, 1], M_inv[0, 2], M_inv[1, 2] = (
M_inv[1, 1],
M_inv[1, 0],
M_inv[0, 1],
M_inv[0, 0],
M_inv[1, 2],
M_inv[0, 2],
)
new_src = [affine_transform(channel, M_inv, output_shape=size, order=1) for channel in channels]
new_src = np.stack(new_src, axis=-1)
return new_src
class VitPoseImageProcessor(BaseImageProcessor):
r"""
Constructs a VitPose image processor.
Args:
do_affine_transform (`bool`, *optional*, defaults to `True`):
Whether to apply an affine transformation to the input images.
size (`dict[str, int]` *optional*, defaults to `{"height": 256, "width": 192}`):
Resolution of the image after `affine_transform` is applied. Only has an effect if `do_affine_transform` is set to `True`. Can
be overridden by `size` in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether or not to apply the scaling factor (to make pixel values floats between 0. and 1.).
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether or not to normalize the input with mean and standard deviation.
image_mean (`list[int]`, defaults to `[0.485, 0.456, 0.406]`, *optional*):
The sequence of means for each channel, to be used when normalizing images.
image_std (`list[int]`, defaults to `[0.229, 0.224, 0.225]`, *optional*):
The sequence of standard deviations for each channel, to be used when normalizing images.
"""
valid_kwargs = VitPoseImageProcessorKwargs
model_input_names = ["pixel_values"]
def __init__(
self,
do_affine_transform: bool = True,
size: Optional[dict[str, int]] = None,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_normalize: bool = True,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
**kwargs,
):
super().__init__(**kwargs)
self.do_affine_transform = do_affine_transform
self.size = size if size is not None else {"height": 256, "width": 192}
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
self.normalize_factor = 200.0
def affine_transform(
self,
image: np.ndarray,
center: tuple[float],
scale: tuple[float],
rotation: float,
size: dict[str, int],
data_format: Optional[ChannelDimension] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> np.ndarray:
"""
Apply an affine transformation to an image.
Args:
image (`np.ndarray`):
Image to transform.
center (`tuple[float]`):
Center of the bounding box (x, y).
scale (`tuple[float]`):
Scale of the bounding box with respect to height/width.
rotation (`float`):
Rotation angle in degrees.
size (`dict[str, int]`):
Size of the destination image.
data_format (`ChannelDimension`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format of the output image.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the input image.
"""
data_format = input_data_format if data_format is None else data_format
size = (size["width"], size["height"])
# one uses a pixel standard deviation of 200 pixels
transformation = get_warp_matrix(rotation, center * 2.0, np.array(size) - 1.0, scale * 200.0)
# input image requires channels last format
image = (
image
if input_data_format == ChannelDimension.LAST
else to_channel_dimension_format(image, ChannelDimension.LAST, input_data_format)
)
image = scipy_warp_affine(src=image, M=transformation, size=(size[1], size[0]))
image = to_channel_dimension_format(image, data_format, ChannelDimension.LAST)
return image
def preprocess(
self,
images: ImageInput,
boxes: Union[list[list[float]], np.ndarray],
do_affine_transform: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
boxes (`list[list[list[float]]]` or `np.ndarray`):
List or array of bounding boxes for each image. Each box should be a list of 4 floats representing the bounding
box coordinates in COCO format (top_left_x, top_left_y, width, height).
do_affine_transform (`bool`, *optional*, defaults to `self.do_affine_transform`):
Whether to apply an affine transformation to the input images.
size (`dict[str, int]` *optional*, defaults to `self.size`):
Dictionary in the format `{"height": h, "width": w}` specifying the size of the output image after
resizing.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use if `do_normalize` is set to `True`.
return_tensors (`str` or [`~utils.TensorType`], *optional*, defaults to `'np'`):
If set, will return tensors of a particular framework. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **pixel_values** -- Pixel values to be fed to a model, of shape (batch_size, num_channels, height,
width).
"""
do_affine_transform = do_affine_transform if do_affine_transform is not None else self.do_affine_transform
size = size if size is not None else self.size
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
if isinstance(boxes, list) and len(images) != len(boxes):
raise ValueError(f"Batch of images and boxes mismatch : {len(images)} != {len(boxes)}")
elif isinstance(boxes, np.ndarray) and len(images) != boxes.shape[0]:
raise ValueError(f"Batch of images and boxes mismatch : {len(images)} != {boxes.shape[0]}")
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if is_scaled_image(images[0]) and do_rescale:
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
# transformations (affine transformation + rescaling + normalization)
if self.do_affine_transform:
new_images = []
for image, image_boxes in zip(images, boxes):
for box in image_boxes:
center, scale = box_to_center_and_scale(
box,
image_width=size["width"],
image_height=size["height"],
normalize_factor=self.normalize_factor,
)
transformed_image = self.affine_transform(
image, center, scale, rotation=0, size=size, input_data_format=input_data_format
)
new_images.append(transformed_image)
images = new_images
# For batch processing, the number of boxes must be consistent across all images in the batch.
# When using a list input, the number of boxes can vary dynamically per image.
# The image processor creates pixel_values of shape (batch_size*num_persons, num_channels, height, width)
all_images = []
for image in images:
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
if do_normalize:
image = self.normalize(
image=image, mean=image_mean, std=image_std, input_data_format=input_data_format
)
all_images.append(image)
images = [
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
for image in all_images
]
data = {"pixel_values": images}
encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors)
return encoded_inputs
def keypoints_from_heatmaps(
self,
heatmaps: np.ndarray,
center: np.ndarray,
scale: np.ndarray,
kernel: int = 11,
):
"""
Get final keypoint predictions from heatmaps and transform them back to
the image.
Args:
heatmaps (`np.ndarray` of shape `(batch_size, num_keypoints, height, width])`):
Model predicted heatmaps.
center (`np.ndarray` of shape `(batch_size, 2)`):
Center of the bounding box (x, y).
scale (`np.ndarray` of shape `(batch_size, 2)`):
Scale of the bounding box wrt original images of width and height.
kernel (int, *optional*, defaults to 11):
Gaussian kernel size (K) for modulation, which should match the heatmap gaussian sigma when training.
K=17 for sigma=3 and k=11 for sigma=2.
Returns:
tuple: A tuple containing keypoint predictions and scores.
- preds (`np.ndarray` of shape `(batch_size, num_keypoints, 2)`):
Predicted keypoint location in images.
- scores (`np.ndarray` of shape `(batch_size, num_keypoints, 1)`):
Scores (confidence) of the keypoints.
"""
batch_size, _, height, width = heatmaps.shape
coords, scores = get_keypoint_predictions(heatmaps)
preds = post_dark_unbiased_data_processing(coords, heatmaps, kernel=kernel)
# Transform back to the image
for i in range(batch_size):
preds[i] = transform_preds(preds[i], center=center[i], scale=scale[i], output_size=[height, width])
return preds, scores
def post_process_pose_estimation(
self,
outputs: "VitPoseEstimatorOutput",
boxes: Union[list[list[list[float]]], np.ndarray],
kernel_size: int = 11,
threshold: Optional[float] = None,
target_sizes: Optional[Union[TensorType, list[tuple]]] = None,
):
"""
Transform the heatmaps into keypoint predictions and transform them back to the image.
Args:
outputs (`VitPoseEstimatorOutput`):
VitPoseForPoseEstimation model outputs.
boxes (`list[list[list[float]]]` or `np.ndarray`):
List or array of bounding boxes for each image. Each box should be a list of 4 floats representing the bounding
box coordinates in COCO format (top_left_x, top_left_y, width, height).
kernel_size (`int`, *optional*, defaults to 11):
Gaussian kernel size (K) for modulation.
threshold (`float`, *optional*, defaults to None):
Score threshold to keep object detection predictions.
target_sizes (`torch.Tensor` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size
`(height, width)` of each image in the batch. If unset, predictions will be resize with the default value.
Returns:
`list[list[Dict]]`: A list of dictionaries, each dictionary containing the keypoints and boxes for an image
in the batch as predicted by the model.
"""
# First compute centers and scales for each bounding box
batch_size, num_keypoints, _, _ = outputs.heatmaps.shape
if target_sizes is not None:
if batch_size != len(target_sizes):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
)
centers = np.zeros((batch_size, 2), dtype=np.float32)
scales = np.zeros((batch_size, 2), dtype=np.float32)
flattened_boxes = list(itertools.chain(*boxes))
for i in range(batch_size):
if target_sizes is not None:
image_width, image_height = target_sizes[i][0], target_sizes[i][1]
scale_factor = np.array([image_width, image_height, image_width, image_height])
flattened_boxes[i] = flattened_boxes[i] * scale_factor
width, height = self.size["width"], self.size["height"]
center, scale = box_to_center_and_scale(flattened_boxes[i], image_width=width, image_height=height)
centers[i, :] = center
scales[i, :] = scale
preds, scores = self.keypoints_from_heatmaps(
outputs.heatmaps.cpu().numpy(), centers, scales, kernel=kernel_size
)
all_boxes = np.zeros((batch_size, 4), dtype=np.float32)
all_boxes[:, 0:2] = centers[:, 0:2]
all_boxes[:, 2:4] = scales[:, 0:2]
poses = torch.tensor(preds)
scores = torch.tensor(scores)
labels = torch.arange(0, num_keypoints)
bboxes_xyxy = torch.tensor(coco_to_pascal_voc(all_boxes))
results: list[list[dict[str, torch.Tensor]]] = []
pose_bbox_pairs = zip(poses, scores, bboxes_xyxy)
for image_bboxes in boxes:
image_results: list[dict[str, torch.Tensor]] = []
for _ in image_bboxes:
# Unpack the next pose and bbox_xyxy from the iterator
pose, score, bbox_xyxy = next(pose_bbox_pairs)
score = score.squeeze()
keypoints_labels = labels
if threshold is not None:
keep = score > threshold
pose = pose[keep]
score = score[keep]
keypoints_labels = keypoints_labels[keep]
pose_result = {"keypoints": pose, "scores": score, "labels": keypoints_labels, "bbox": bbox_xyxy}
image_results.append(pose_result)
results.append(image_results)
return results
__all__ = ["VitPoseImageProcessor"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/vitpose/__init__.py | src/transformers/models/vitpose/__init__.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_vitpose import *
from .image_processing_vitpose import *
from .image_processing_vitpose_fast import *
from .modeling_vitpose import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/vitpose/modeling_vitpose.py | src/transformers/models/vitpose/modeling_vitpose.py | # coding=utf-8
# Copyright 2024 University of Sydney and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch VitPose model."""
from dataclasses import dataclass
from typing import Optional, Union
import torch
from torch import nn
from ... import initialization as init
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...processing_utils import Unpack
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, logging
from ...utils.backbone_utils import load_backbone
from ...utils.generic import can_return_tuple
from .configuration_vitpose import VitPoseConfig
logger = logging.get_logger(__name__)
# General docstring
@dataclass
@auto_docstring(
custom_intro="""
Class for outputs of pose estimation models.
"""
)
class VitPoseEstimatorOutput(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Loss is not supported at this moment. See https://github.com/ViTAE-Transformer/ViTPose/tree/main/mmpose/models/losses for further detail.
heatmaps (`torch.FloatTensor` of shape `(batch_size, num_keypoints, height, width)`):
Heatmaps as predicted by the model.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states
(also called feature maps) of the model at the output of each stage.
"""
loss: Optional[torch.FloatTensor] = None
heatmaps: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@auto_docstring
class VitPosePreTrainedModel(PreTrainedModel):
config: VitPoseConfig
base_model_prefix = "vit"
main_input_name = "pixel_values"
input_modalities = ("image",)
supports_gradient_checkpointing = True
@torch.no_grad()
def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
init.trunc_normal_(module.weight, mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
init.zeros_(module.bias)
elif isinstance(module, nn.LayerNorm):
init.zeros_(module.bias)
init.ones_(module.weight)
def flip_back(output_flipped, flip_pairs, target_type="gaussian-heatmap"):
"""Flip the flipped heatmaps back to the original form.
Args:
output_flipped (`torch.tensor` of shape `(batch_size, num_keypoints, height, width)`):
The output heatmaps obtained from the flipped images.
flip_pairs (`torch.Tensor` of shape `(num_keypoints, 2)`):
Pairs of keypoints which are mirrored (for example, left ear -- right ear).
target_type (`str`, *optional*, defaults to `"gaussian-heatmap"`):
Target type to use. Can be gaussian-heatmap or combined-target.
gaussian-heatmap: Classification target with gaussian distribution.
combined-target: The combination of classification target (response map) and regression target (offset map).
Paper ref: Huang et al. The Devil is in the Details: Delving into Unbiased Data Processing for Human Pose Estimation (CVPR 2020).
Returns:
torch.Tensor: heatmaps that flipped back to the original image
"""
if target_type not in ["gaussian-heatmap", "combined-target"]:
raise ValueError("target_type should be gaussian-heatmap or combined-target")
if output_flipped.ndim != 4:
raise ValueError("output_flipped should be [batch_size, num_keypoints, height, width]")
batch_size, num_keypoints, height, width = output_flipped.shape
channels = 1
if target_type == "combined-target":
channels = 3
output_flipped[:, 1::3, ...] = -output_flipped[:, 1::3, ...]
output_flipped = output_flipped.reshape(batch_size, -1, channels, height, width)
output_flipped_back = output_flipped.clone()
# Swap left-right parts
for left, right in flip_pairs.tolist():
output_flipped_back[:, left, ...] = output_flipped[:, right, ...]
output_flipped_back[:, right, ...] = output_flipped[:, left, ...]
output_flipped_back = output_flipped_back.reshape((batch_size, num_keypoints, height, width))
# Flip horizontally
output_flipped_back = output_flipped_back.flip(-1)
return output_flipped_back
class VitPoseSimpleDecoder(nn.Module):
"""
Simple decoding head consisting of a ReLU activation, 4x upsampling and a 3x3 convolution, turning the
feature maps into heatmaps.
"""
def __init__(self, config: VitPoseConfig):
super().__init__()
self.activation = nn.ReLU()
self.upsampling = nn.Upsample(scale_factor=config.scale_factor, mode="bilinear", align_corners=False)
self.conv = nn.Conv2d(
config.backbone_config.hidden_size, config.num_labels, kernel_size=3, stride=1, padding=1
)
def forward(self, hidden_state: torch.Tensor, flip_pairs: Optional[torch.Tensor] = None) -> torch.Tensor:
# Transform input: ReLU + upsample
hidden_state = self.activation(hidden_state)
hidden_state = self.upsampling(hidden_state)
heatmaps = self.conv(hidden_state)
if flip_pairs is not None:
heatmaps = flip_back(heatmaps, flip_pairs)
return heatmaps
class VitPoseClassicDecoder(nn.Module):
"""
Classic decoding head consisting of a 2 deconvolutional blocks, followed by a 1x1 convolution layer,
turning the feature maps into heatmaps.
"""
def __init__(self, config: VitPoseConfig):
super().__init__()
self.deconv1 = nn.ConvTranspose2d(
config.backbone_config.hidden_size, 256, kernel_size=4, stride=2, padding=1, bias=False
)
self.batchnorm1 = nn.BatchNorm2d(256)
self.relu1 = nn.ReLU()
self.deconv2 = nn.ConvTranspose2d(256, 256, kernel_size=4, stride=2, padding=1, bias=False)
self.batchnorm2 = nn.BatchNorm2d(256)
self.relu2 = nn.ReLU()
self.conv = nn.Conv2d(256, config.num_labels, kernel_size=1, stride=1, padding=0)
def forward(self, hidden_state: torch.Tensor, flip_pairs: Optional[torch.Tensor] = None):
hidden_state = self.deconv1(hidden_state)
hidden_state = self.batchnorm1(hidden_state)
hidden_state = self.relu1(hidden_state)
hidden_state = self.deconv2(hidden_state)
hidden_state = self.batchnorm2(hidden_state)
hidden_state = self.relu2(hidden_state)
heatmaps = self.conv(hidden_state)
if flip_pairs is not None:
heatmaps = flip_back(heatmaps, flip_pairs)
return heatmaps
@auto_docstring(
custom_intro="""
The VitPose model with a pose estimation head on top.
"""
)
class VitPoseForPoseEstimation(VitPosePreTrainedModel):
def __init__(self, config: VitPoseConfig):
super().__init__(config)
self.backbone = load_backbone(config)
# add backbone attributes
if not hasattr(self.backbone.config, "hidden_size"):
raise ValueError("The backbone should have a hidden_size attribute")
if not hasattr(self.backbone.config, "image_size"):
raise ValueError("The backbone should have an image_size attribute")
if not hasattr(self.backbone.config, "patch_size"):
raise ValueError("The backbone should have a patch_size attribute")
self.head = VitPoseSimpleDecoder(config) if config.use_simple_decoder else VitPoseClassicDecoder(config)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
pixel_values: torch.Tensor,
dataset_index: Optional[torch.Tensor] = None,
flip_pairs: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> VitPoseEstimatorOutput:
r"""
dataset_index (`torch.Tensor` of shape `(batch_size,)`):
Index to use in the Mixture-of-Experts (MoE) blocks of the backbone.
This corresponds to the dataset index used during training, e.g. For the single dataset index 0 refers to the corresponding dataset. For the multiple datasets index 0 refers to dataset A (e.g. MPII) and index 1 refers to dataset B (e.g. CrowdPose).
flip_pairs (`torch.tensor`, *optional*):
Whether to mirror pairs of keypoints (for example, left ear -- right ear).
Examples:
```python
>>> from transformers import AutoImageProcessor, VitPoseForPoseEstimation
>>> import torch
>>> from PIL import Image
>>> import requests
>>> processor = AutoImageProcessor.from_pretrained("usyd-community/vitpose-base-simple")
>>> model = VitPoseForPoseEstimation.from_pretrained("usyd-community/vitpose-base-simple")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> boxes = [[[412.8, 157.61, 53.05, 138.01], [384.43, 172.21, 15.12, 35.74]]]
>>> inputs = processor(image, boxes=boxes, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> heatmaps = outputs.heatmaps
```"""
loss = None
if labels is not None:
raise NotImplementedError("Training is not yet supported")
outputs: BackboneOutput = self.backbone.forward_with_filtered_kwargs(
pixel_values,
dataset_index=dataset_index,
**kwargs,
)
# Turn output hidden states in tensor of shape (batch_size, num_channels, height, width)
sequence_output = outputs.feature_maps[-1]
batch_size = sequence_output.shape[0]
patch_height = self.config.backbone_config.image_size[0] // self.config.backbone_config.patch_size[0]
patch_width = self.config.backbone_config.image_size[1] // self.config.backbone_config.patch_size[1]
sequence_output = sequence_output.permute(0, 2, 1)
sequence_output = sequence_output.reshape(batch_size, -1, patch_height, patch_width).contiguous()
heatmaps = self.head(sequence_output, flip_pairs=flip_pairs)
return VitPoseEstimatorOutput(
loss=loss,
heatmaps=heatmaps,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = ["VitPosePreTrainedModel", "VitPoseForPoseEstimation"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/vjepa2/configuration_vjepa2.py | src/transformers/models/vjepa2/configuration_vjepa2.py | # coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""VJEPA 2 model configuration"""
from ...configuration_utils import PreTrainedConfig
class VJEPA2Config(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`VJEPA2Model`]. It is used to instantiate an
VJEPA2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the VJEPA2
[facebook/vjepa2-vitl-fpc64-256](https://huggingface.co/facebook/vjepa2-vitl-fpc64-256) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
crop_size (`int`, *optional*, defaults to 256):
Input resolution of the model
frames_per_clip (`int`, *optional*, defaults to 64):
The number of frames the model has been pretrained with. Does not impact inference.
tubelet_size (`int`, *optional*, defaults to 2):
The number of temporal frames used for a single rastor, check paper for more information.
hidden_size (`int`, *optional*, defaults to 1024):
Dimensionality of the encoder layers
in_chans (`int`, *optional*, defaults to 3):
The number of input channels
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Encoder
num_hidden_layers (`int`, *optional*, defaults to 24):
The number of hidden layers
drop_path_rate (`float`, *optional*, defaults to 0.0):
Stochastic depth rate per sample (when applied in the main path of residual layers).
mlp_ratio (`float`, *optional*, defaults to 4.0):
Ratio of the hidden size of the MLPs used in Encoder relative to the `hidden_size`.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for attentions.
The dropout probability for all fully connected layers.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for attentions.
num_pooler_layers (`int`, *optional*, defaults to 3):
The number of self-attention layers in the pooler.
pred_hidden_size (`int`, *optional*, defaults to 384):
Dimensionality of the predictor layers
pred_num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Predictor
pred_num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Predictor
pred_num_mask_tokens (`int`, *optional*, defaults to 10):
Define the number of mask tokens to use in the Predictor
pred_zero_init_mask_tokens (`bool`, *optional*, defaults to `True`):
Initialize the mask tokens in the predictor with 0.
pred_mlp_ratio (`float`, *optional*, defaults to 4.0):
Ratio of the hidden size of the MLPs used in Predictor relative to the `pred_hidden_size`.
Example:
```python
>>> from transformers import VJEPA2Config, VJEPA2Model
>>> # Initializing a VJEPA2 vjepa2-vitl-fpc64-256 style configuration
>>> configuration = VJEPA2Config()
>>> # Initializing a model (with random weights) from the vjepa2-vitl-fpc64-256 style configuration
>>> model = VJEPA2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "vjepa2"
def __init__(
self,
patch_size=16,
crop_size=256,
frames_per_clip=64,
tubelet_size=2,
hidden_size=1024,
in_chans=3,
num_attention_heads=16,
num_hidden_layers=24,
drop_path_rate=0.0,
mlp_ratio=4.0,
layer_norm_eps=1e-6,
qkv_bias=True,
attention_probs_dropout_prob=0.0,
hidden_act="gelu",
initializer_range=0.02,
attention_dropout=0.0,
num_pooler_layers=3,
# predictor params
pred_hidden_size=384,
pred_num_attention_heads=12,
pred_num_hidden_layers=12,
pred_num_mask_tokens=10,
pred_zero_init_mask_tokens=True,
pred_mlp_ratio=4.0,
**kwargs,
):
super().__init__(**kwargs)
self.crop_size = crop_size
self.frames_per_clip = frames_per_clip
self.patch_size = patch_size
self.tubelet_size = tubelet_size
self.hidden_size = hidden_size
self.in_chans = in_chans
self.num_attention_heads = num_attention_heads
self.num_hidden_layers = num_hidden_layers
self.drop_path_rate = drop_path_rate
self.mlp_ratio = mlp_ratio
self.layer_norm_eps = layer_norm_eps
self.qkv_bias = qkv_bias
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.image_size = crop_size
self.attention_dropout = attention_dropout
self.num_pooler_layers = num_pooler_layers
# predictor params
self.pred_hidden_size = pred_hidden_size
self.pred_num_attention_heads = pred_num_attention_heads
self.pred_num_hidden_layers = pred_num_hidden_layers
self.pred_num_mask_tokens = pred_num_mask_tokens
self.pred_zero_init_mask_tokens = pred_zero_init_mask_tokens
self.pred_mlp_ratio = pred_mlp_ratio
__all__ = ["VJEPA2Config"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/vjepa2/convert_vjepa2_classifier_to_hf.py | src/transformers/models/vjepa2/convert_vjepa2_classifier_to_hf.py | # coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import re
import numpy as np
import torch
from decord import VideoReader
from huggingface_hub import HfApi, hf_hub_download
from transformers import VJEPA2ForVideoClassification, VJEPA2VideoProcessor
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def get_video():
path = hf_hub_download(
repo_id="nateraw/kinetics-mini",
filename="val/bowling/-WH-lxmGJVY_000005_000015.mp4",
repo_type="dataset",
)
video_reader = VideoReader(path)
return video_reader
CLASSIFIERS = {
# Something-Something-v2 dataset
"vjepa2-vitl-fpc16-256-ssv2": {
"base_model": "facebook/vjepa2-vitl-fpc64-256",
"checkpoint": "https://dl.fbaipublicfiles.com/vjepa2/evals/ssv2-vitl-16x2x3.pt",
"num_labels": 174,
"frames_per_clip": 16,
"dataset": "something-something-v2",
"result": (145, 0.30867, "Stuffing [something] into [something]"),
},
"vjepa2-vitg-fpc64-384-ssv2": {
"base_model": "facebook/vjepa2-vitg-fpc64-384",
"checkpoint": "https://dl.fbaipublicfiles.com/vjepa2/evals/ssv2-vitg-384-64x2x3.pt",
"frames_per_clip": 64,
"num_labels": 174,
"dataset": "something-something-v2",
"result": (112, 0.26408, "Putting [something] onto [something]"),
},
# Diving48 dataset
"vjepa2-vitl-fpc32-256-diving48": {
"base_model": "facebook/vjepa2-vitl-fpc64-256",
"checkpoint": "https://dl.fbaipublicfiles.com/vjepa2/evals/diving48-vitl-256.pt",
"num_labels": 48,
"frames_per_clip": 32,
"dataset": "diving48",
"result": (35, 0.32875, "['Inward', '35som', 'NoTwis', 'TUCK']"),
},
"vjepa2-vitg-fpc32-384-diving48": {
"base_model": "facebook/vjepa2-vitg-fpc64-384",
"checkpoint": "https://dl.fbaipublicfiles.com/vjepa2/evals/diving48-vitg-384-32x4x3.pt",
"frames_per_clip": 32,
"num_labels": 48,
"dataset": "diving48",
"result": (22, 0.35351, "['Forward', '25som', '2Twis', 'PIKE']"),
},
}
# fmt: off
ORIGINAL_TO_CONVERTED_KEY_MAPPING = {
r"module.pooler.query_tokens": r"pooler.query_tokens",
r"module.pooler.cross_attention_block.norm(\d+).": r"pooler.cross_attention_layer.layer_norm\1.",
r"module.pooler.cross_attention_block.xattn.(q|k|v).": r"pooler.cross_attention_layer.cross_attn.\1_proj.",
r"module.pooler.cross_attention_block.mlp.fc(\d+).": r"pooler.cross_attention_layer.mlp.fc\1.",
r"module.pooler.blocks.(\d+).norm(\d+).": r"pooler.self_attention_layers.\1.layer_norm\2.",
r"module.pooler.blocks.(\d+).attn.(q|k|v).": r"pooler.self_attention_layers.\1.self_attn.\2_proj.",
r"module.pooler.blocks.(\d+).attn.proj.": r"pooler.self_attention_layers.\1.self_attn.out_proj.",
r"module.pooler.blocks.(\d+).mlp.fc(\d+).": r"pooler.self_attention_layers.\1.mlp.fc\2.",
r"module.linear.": r"classifier.",
}
# fmt: on
def get_id2label_mapping(dataset_name: str) -> dict[int, str]:
path = hf_hub_download(
repo_id="huggingface/label-files",
filename=f"{dataset_name}-id2label.json",
repo_type="dataset",
)
with open(path, "r") as f:
id2label = json.load(f)
id2label = {int(k): v for k, v in id2label.items()}
return id2label
def split_qkv(state_dict):
state_dict = state_dict.copy()
keys = list(state_dict.keys())
for key in keys:
if ".qkv." in key:
tensor = state_dict.pop(key)
q, k, v = torch.chunk(tensor, 3, dim=0)
state_dict[key.replace(".qkv.", ".q.")] = q
state_dict[key.replace(".qkv.", ".k.")] = k
state_dict[key.replace(".qkv.", ".v.")] = v
elif ".kv." in key:
tensor = state_dict.pop(key)
k, v = torch.chunk(tensor, 2, dim=0)
state_dict[key.replace(".kv.", ".k.")] = k
state_dict[key.replace(".kv.", ".v.")] = v
return state_dict
def convert_old_keys_to_new_keys(state_dict):
"""
This function should be applied only once, on the concatenated keys to efficiently rename using
the key mappings.
"""
output_dict = {}
old_text = "\n".join(state_dict)
new_text = old_text
for pattern, replacement in ORIGINAL_TO_CONVERTED_KEY_MAPPING.items():
if replacement is None:
new_text = re.sub(pattern, "", new_text) # an empty line
continue
new_text = re.sub(pattern, replacement, new_text)
output_dict = dict(zip(old_text.split("\n"), new_text.split("\n")))
return output_dict
def main(args: argparse.Namespace):
model_params = CLASSIFIERS[args.model_name]
id2label = get_id2label_mapping(model_params["dataset"])
if not len(id2label) == model_params["num_labels"]:
raise ValueError(
f"Number of labels in id2label mapping ({len(id2label)}) does not "
f"match number of labels in model ({model_params['num_labels']})"
)
model = VJEPA2ForVideoClassification.from_pretrained(
model_params["base_model"],
num_labels=model_params["num_labels"],
id2label=id2label,
frames_per_clip=model_params["frames_per_clip"],
)
processor = VJEPA2VideoProcessor.from_pretrained(model_params["base_model"])
# load and convert classifier checkpoint
checkpoint = torch.hub.load_state_dict_from_url(model_params["checkpoint"])
state_dict = checkpoint["classifiers"][0]
state_dict_qkv_split = split_qkv(state_dict)
key_mapping = convert_old_keys_to_new_keys(state_dict_qkv_split.keys())
converted_state_dict2 = {key_mapping[k]: v for k, v in state_dict_qkv_split.items()}
result = model.load_state_dict(converted_state_dict2, strict=False)
if result.unexpected_keys:
raise ValueError(f"Error loading state dict: {result.unexpected_keys}")
if not args.skip_verification:
# get inputs
video_reader = get_video()
frame_indexes = np.arange(0, 128, 128 / model_params["frames_per_clip"])
video = video_reader.get_batch(frame_indexes).asnumpy()
inputs = processor(video, return_tensors="pt").to(device)
# run model
model.to(device).eval()
with torch.no_grad():
outputs = model(**inputs)
# compare results
probs = torch.softmax(outputs.logits, dim=-1)
top_prob, top_idx = probs.topk(1)
top_prob, top_idx = top_prob.item(), top_idx.item()
label = id2label[top_idx]
expected_id, expected_prob, expected_label = model_params["result"]
if not top_idx == expected_id:
raise ValueError(f"Expected id {expected_id} but got {top_idx}")
if not label == expected_label:
raise ValueError(f"Expected label {expected_label} but got {label}")
if not np.isclose(top_prob, expected_prob, atol=1e-3):
raise ValueError(f"Expected prob {expected_prob} but got {top_prob}")
print("Verification passed")
output_dir = os.path.join(args.base_dir, args.model_name)
model.save_pretrained(output_dir)
processor.save_pretrained(output_dir)
if args.push_to_hub:
api = HfApi()
repo_id = f"{args.repo_org}/{args.model_name}"
if not api.repo_exists(repo_id):
api.create_repo(repo_id, repo_type="model")
api.upload_folder(folder_path=output_dir, repo_id=repo_id, repo_type="model")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model_name", type=str, required=True)
parser.add_argument("--base_dir", type=str, default="converted_models/")
parser.add_argument("--repo_org", type=str, default="qubvel-hf")
parser.add_argument("--push_to_hub", action="store_true")
parser.add_argument("--skip_verification", action="store_true")
args = parser.parse_args()
main(args)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/vjepa2/video_processing_vjepa2.py | src/transformers/models/vjepa2/video_processing_vjepa2.py | # coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Video processor class for VJEPA2."""
from ...image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
from ...processing_utils import Unpack, VideosKwargs
from ...video_processing_utils import BaseVideoProcessor
class VJEPA2VideoProcessor(BaseVideoProcessor):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_DEFAULT_MEAN
image_std = IMAGENET_DEFAULT_STD
size = {"shortest_edge": int(256 * 256 / 224)}
crop_size = 256
do_resize = True
do_rescale = True
do_center_crop = True
do_normalize = True
def __init__(self, **kwargs: Unpack[VideosKwargs]):
crop_size = kwargs.get("crop_size", 256)
if not isinstance(crop_size, int):
if not isinstance(crop_size, dict) or "height" not in crop_size:
raise ValueError("crop_size must be an integer or a dictionary with a 'height' key")
crop_size = crop_size["height"]
resize_size = int(crop_size * 256 / 224)
kwargs["size"] = {"shortest_edge": resize_size}
super().__init__(**kwargs)
__all__ = ["VJEPA2VideoProcessor"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/vjepa2/convert_vjepa2_to_hf.py | src/transformers/models/vjepa2/convert_vjepa2_to_hf.py | # coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import tempfile
from pathlib import Path
import numpy as np
import requests
import torch
from huggingface_hub import HfApi
from PIL import Image
from transformers import VJEPA2Config, VJEPA2Model, VJEPA2VideoProcessor
from transformers.models.vjepa2.modeling_vjepa2 import apply_masks
HUB_REPO = "https://github.com/facebookresearch/vjepa2"
HUB_SOURCE = "github"
HUB_MODELS = {
"vit_large": "facebook/vjepa2-vitl-fpc64-256",
"vit_huge": "facebook/vjepa2-vith-fpc64-256",
"vit_giant": "facebook/vjepa2-vitg-fpc64-256",
"vit_giant_384": "facebook/vjepa2-vitg-fpc64-384",
}
S3_MODELS = {
"vit_large": "https://dl.fbaipublicfiles.com/vjepa2/vitl.pt",
"vit_huge": "https://dl.fbaipublicfiles.com/vjepa2/vith.pt",
"vit_giant": "https://dl.fbaipublicfiles.com/vjepa2/vitg.pt",
"vit_giant_384": "https://dl.fbaipublicfiles.com/vjepa2/vitg-384.pt",
}
TOKEN = os.environ.get("HF_TOKEN", None)
def get_vjepa2_config(model_name):
# size of the architecture
if model_name == "vit_large":
return VJEPA2Config(
crop_size=256,
frames_per_clip=64,
hidden_size=1024,
num_attention_heads=16,
num_hidden_layers=24,
mlp_ratio=4,
pred_hidden_size=384,
pred_num_attention_heads=12,
pred_num_hidden_layers=12,
pred_num_mask_tokens=10,
)
elif model_name == "vit_huge":
return VJEPA2Config(
crop_size=256,
frames_per_clip=64,
hidden_size=1280,
num_attention_heads=16,
num_hidden_layers=32,
mlp_ratio=4,
pred_hidden_size=384,
pred_num_attention_heads=12,
pred_num_hidden_layers=12,
pred_num_mask_tokens=10,
)
elif model_name == "vit_giant":
return VJEPA2Config(
crop_size=256,
frames_per_clip=64,
hidden_size=1408,
num_attention_heads=22,
num_hidden_layers=40,
mlp_ratio=48 / 11,
pred_hidden_size=384,
pred_num_attention_heads=12,
pred_num_hidden_layers=12,
pred_num_mask_tokens=10,
)
elif model_name == "vit_giant_384":
return VJEPA2Config(
crop_size=384,
frames_per_clip=64,
hidden_size=1408,
num_attention_heads=22,
num_hidden_layers=40,
mlp_ratio=48 / 11,
pred_hidden_size=384,
pred_num_attention_heads=12,
pred_num_hidden_layers=12,
pred_num_mask_tokens=10,
)
else:
raise ValueError("Model not supported")
def convert_encoder_keys(model_state_dict, og_encoder_state_dict, config):
emb_dim = config.hidden_size
for key, val in og_encoder_state_dict.copy().items():
val = og_encoder_state_dict.pop(key)
key = key.replace("module.backbone.", "")
if key.startswith("blocks."):
key = key.replace("blocks.", "encoder.layer.")
if "attn." in key:
key = key.replace("attn.", "attention.")
if key == "pos_embed":
key = "encoder.embeddings.position_embeddings"
if "patch_embed." in key:
key = key.replace("patch_embed.", "encoder.embeddings.patch_embeddings.")
if key.startswith("norm."):
key = key.replace("norm.", "encoder.layernorm.")
if "qkv." in key:
prefix, suffix = key.split("qkv")
if "bias" in suffix:
q_e, k_e, v_e = (
val[0:emb_dim],
val[emb_dim : emb_dim * 2],
val[emb_dim * 2 :],
)
else:
q_e, k_e, v_e = (
val[0:emb_dim, :],
val[emb_dim : emb_dim * 2, :],
val[emb_dim * 2 :, :],
)
og_encoder_state_dict[prefix + "query" + suffix] = q_e
og_encoder_state_dict[prefix + "key" + suffix] = k_e
og_encoder_state_dict[prefix + "value" + suffix] = v_e
else:
og_encoder_state_dict[key] = val
return og_encoder_state_dict
def convert_predictor_keys(model_state_dict, og_predictor_state_dict, config):
emb_dim = config.pred_hidden_size
if "predictor_pos_embed" in og_predictor_state_dict:
del og_predictor_state_dict["predictor_pos_embed"]
# update predictor weights
mask_tokens = {}
mask_token_keys_to_delete = []
for key, val in og_predictor_state_dict.copy().items():
val = og_predictor_state_dict.pop(key)
key = key.replace("module.backbone.", "")
if key.startswith("predictor_blocks."):
key = key.replace("predictor_blocks.", "predictor.layer.")
if "attn." in key:
key = key.replace("attn.", "attention.")
if key == "predictor_pos_embed":
key = "predictor.embeddings.position_embeddings"
if "predictor_embed." in key:
key = key.replace("predictor_embed.", "predictor.embeddings.predictor_embeddings.")
if "mask_tokens." in key:
mask_tokens[key.split("mask_tokens.")[-1]] = val
mask_token_keys_to_delete.append(key)
# key = key.replace("mask_tokens.", "predictor.embeddings.mask_tokens.")
if key.startswith("predictor_norm."):
key = key.replace("predictor_norm.", "predictor.layernorm.")
if key.startswith("predictor_proj."):
key = key.replace("predictor_proj.", "predictor.proj.")
if "qkv." in key:
prefix, suffix = key.split("qkv")
if "bias" in suffix:
q_e, k_e, v_e = (
val[0:emb_dim],
val[emb_dim : emb_dim * 2],
val[emb_dim * 2 :],
)
else:
q_e, k_e, v_e = (
val[0:emb_dim, :],
val[emb_dim : emb_dim * 2, :],
val[emb_dim * 2 :, :],
)
og_predictor_state_dict[prefix + "query" + suffix] = q_e
og_predictor_state_dict[prefix + "key" + suffix] = k_e
og_predictor_state_dict[prefix + "value" + suffix] = v_e
else:
og_predictor_state_dict[key] = val
mask_tokens = torch.stack([mask_tokens[f"{i}"] for i in range(len(mask_tokens))], dim=0)
for k in mask_token_keys_to_delete:
del og_predictor_state_dict[k]
og_predictor_state_dict["predictor.embeddings.mask_tokens"] = mask_tokens
return og_predictor_state_dict
def prepare_img():
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
return image
def upload_original_ckpts(model_name):
hf_repo = HUB_MODELS[model_name]
original_ckpt = S3_MODELS[model_name]
print(f"Uploading original checkpoint for vjepa2 {model_name} to {hf_repo}/original/")
with tempfile.NamedTemporaryFile() as fn:
local_path = fn.name
torch.hub.download_url_to_file(original_ckpt, local_path)
api = HfApi()
api.upload_file(
repo_id=hf_repo,
path_or_fileobj=local_path,
path_in_repo="original/model.pth",
repo_type="model",
token=TOKEN,
)
print("Uploading complete")
@torch.no_grad()
def convert_and_test_vjepa2_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub=False):
"""
Copy/paste/tweak model's weights to our VJEPA2 structure.
"""
config = get_vjepa2_config(model_name)
# load original model from torch hub
original_encoder, original_predictor = torch.hub.load(HUB_REPO, "vjepa2_" + model_name, source=HUB_SOURCE)
original_encoder.eval()
original_predictor.eval()
original_preprocessor = torch.hub.load(
HUB_REPO, "vjepa2_preprocessor", source=HUB_SOURCE, crop_size=config.crop_size
)
# load state_dict of original model, remove and rename some keys
encoder_state_dict = original_encoder.state_dict()
decoder_state_dict = original_predictor.state_dict()
model = VJEPA2Model(config).eval()
state_dict = model.state_dict()
og_encoder_sd = convert_encoder_keys(state_dict, encoder_state_dict, config)
og_predictor_sd = convert_predictor_keys(state_dict, decoder_state_dict, config)
og_state_dict = og_encoder_sd
og_state_dict.update(og_predictor_sd)
model.load_state_dict(og_state_dict)
# load image
image = prepare_img()
image = torch.Tensor(np.array(image)).unsqueeze(0).permute(0, 3, 1, 2)
print("Input shape: ", image.shape)
crop_size = config.crop_size
processor = VJEPA2VideoProcessor(crop_size=crop_size)
pr_out = processor(image, return_tensors="pt")
pixel_values_videos = pr_out.pixel_values_videos
# run original preprocessor
original_pixel_values = original_preprocessor(image)
assert original_pixel_values[0].permute(1, 0, 2, 3).shape == pixel_values_videos[0].shape
assert torch.allclose(original_pixel_values[0].permute(1, 0, 2, 3), pixel_values_videos[0], atol=1e-3)
with torch.no_grad():
# reshape and move to gpu
if pixel_values_videos.size(1) == 1:
pixel_values_videos = pixel_values_videos.repeat(1, config.frames_per_clip, 1, 1, 1)
# pixel_values_videos = pixel_values_videos.permute(0, 2, 1, 3, 4) # B x C x T x H x W
pixel_values_videos = pixel_values_videos.to(device="cuda", dtype=torch.float32)
original_encoder = original_encoder.to(device="cuda", dtype=torch.float32)
original_predictor = original_predictor.to(device="cuda", dtype=torch.float32)
model = model.to(device="cuda", dtype=torch.float32)
# forward
original_encoder_outputs = original_encoder(pixel_values_videos.permute(0, 2, 1, 3, 4))
B, N, _ = original_encoder_outputs.shape
# test full mask
context_mask = [torch.arange(N, device=pixel_values_videos.device).unsqueeze(0).repeat((B, 1))]
predictor_mask = context_mask
original_predictor_outputs = original_predictor(original_encoder_outputs, context_mask, predictor_mask)
outputs = model(pixel_values_videos, context_mask=context_mask, target_mask=predictor_mask)
assert torch.allclose(outputs.last_hidden_state, original_encoder_outputs, atol=1e-3)
predictor_outputs = outputs.predictor_output
assert torch.allclose(predictor_outputs.last_hidden_state, original_predictor_outputs, atol=1e-3)
# test partial mask
window_size = 256
mask = torch.arange(N, device=pixel_values_videos.device).unsqueeze(0)
context_mask = [mask[:, :window_size].repeat((B, 1))]
predictor_mask = [mask[:, window_size : window_size * 2].repeat((B, 1))]
original_predictor_outputs = original_predictor(
apply_masks(original_encoder_outputs, context_mask),
context_mask,
predictor_mask,
)
outputs = model(pixel_values_videos, context_mask=context_mask, target_mask=predictor_mask)
assert torch.allclose(outputs.last_hidden_state, original_encoder_outputs, atol=1e-3)
predictor_outputs = outputs.predictor_output
assert torch.allclose(predictor_outputs.last_hidden_state, original_predictor_outputs, atol=1e-3)
print("Looks ok!")
if pytorch_dump_folder_path is not None:
Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
print(f"Saving model {model_name} to {pytorch_dump_folder_path}")
model.save_pretrained(pytorch_dump_folder_path)
print(f"Saving image processor to {pytorch_dump_folder_path}")
processor.save_pretrained(pytorch_dump_folder_path)
if push_to_hub:
name = HUB_MODELS[model_name]
model.push_to_hub(name, private=True)
processor.push_to_hub(name, private=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="vit_large",
type=str,
choices=[
"vit_large",
"vit_huge",
"vit_giant",
"vit_giant_384",
],
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model to the Hugging Face hub.",
)
parser.add_argument("--upload_original", action="store_true", help="upload the original checkpoint")
args = parser.parse_args()
convert_and_test_vjepa2_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
if args.upload_original:
upload_original_ckpts(args.model_name)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/vjepa2/__init__.py | src/transformers/models/vjepa2/__init__.py | # coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_vjepa2 import *
from .modeling_vjepa2 import *
from .video_processing_vjepa2 import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/vjepa2/modeling_vjepa2.py | src/transformers/models/vjepa2/modeling_vjepa2.py | # coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Callable
from dataclasses import dataclass
from typing import Optional, Union
import torch
from torch import nn
from ... import initialization as init
from ...activations import ACT2FN
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import BaseModelOutput, ImageClassifierOutput
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...utils import ModelOutput, auto_docstring, can_return_tuple, logging
from .configuration_vjepa2 import VJEPA2Config
logger = logging.get_logger(__name__)
@dataclass
@auto_docstring(
custom_intro="""
VJEPA Predictor outputs that also contains the masked encoder outputs
"""
)
class VJEPA2WithMaskedInputPredictorOutput(ModelOutput):
r"""
masked_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*, returned when `context_mask` is provided which is applied on VJEPA2Encoder outputs):
The masked hidden state of the model.
target_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*, returned when `target_mask` is provided which is applied on VJEPA2Encoder outputs):
The target hidden state of the model.
"""
last_hidden_state: torch.FloatTensor
masked_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
target_hidden_state: Optional[torch.FloatTensor] = None
@dataclass
@auto_docstring(
custom_intro="""
VJEPA outputs that also contains the masked encoder outputs
Optionally contains the predictor outputs
"""
)
class VJEPA2WithMaskedInputModelOutput(ModelOutput):
r"""
masked_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*, returned when `context_mask` is provided which is applied on VJEPA2Encoder outputs):
The masked hidden state of the model.
predictor_output (`VJEPA2WithMaskedInputPredictorOutput`, *optional*):
The output from the Predictor module.
"""
last_hidden_state: torch.FloatTensor
masked_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
predictor_output: Optional[VJEPA2WithMaskedInputPredictorOutput] = None
def to_tuple(self):
output = list(super().to_tuple())
if isinstance(output[-1], VJEPA2WithMaskedInputPredictorOutput):
output[-1] = output[-1].to_tuple()
return tuple(output)
class VJEPA2PatchEmbeddings3D(nn.Module):
"""
Image to Patch Embedding
"""
def __init__(
self,
config: VJEPA2Config,
hidden_size: int = 1024,
):
super().__init__()
self.patch_size = config.patch_size
self.tubelet_size = config.tubelet_size
self.hidden_size = hidden_size
self.proj = nn.Conv3d(
in_channels=config.in_chans,
out_channels=hidden_size,
kernel_size=(config.tubelet_size, config.patch_size, config.patch_size),
stride=(config.tubelet_size, config.patch_size, config.patch_size),
)
@staticmethod
def num_patches(config):
return (
(config.frames_per_clip // config.tubelet_size)
* (config.crop_size // config.patch_size)
* (config.crop_size // config.patch_size)
)
def forward(self, pixel_values_videos: torch.Tensor) -> torch.Tensor:
x = self.proj(pixel_values_videos).flatten(2).transpose(1, 2)
return x
class VJEPA2Embeddings(nn.Module):
"""
Construct mask token, position and patch embeddings.
"""
def __init__(self, config: VJEPA2Config, hidden_size: int = 1024):
super().__init__()
self.config = config
self.hidden_size = hidden_size
self.patch_embeddings = VJEPA2PatchEmbeddings3D(config, hidden_size=hidden_size)
self.num_patches = self.patch_embeddings.num_patches
self.patch_size = config.patch_size
def forward(self, pixel_values_videos: torch.Tensor) -> torch.Tensor:
num_frames = pixel_values_videos.shape[1]
# Swap `frames` and `channels` dims, the result is:
# (batch_size, channels, num_frames, height, width)
pixel_values_videos = pixel_values_videos.permute(0, 2, 1, 3, 4)
# For some cases, if the input vision (image/video) consists of num_frames < tubelet_size,
# then embedding lookup fails. In these cases, we duplicate the frames.
if num_frames < self.config.tubelet_size:
pixel_values_videos = pixel_values_videos.repeat(1, 1, self.config.tubelet_size, 1, 1)
target_dtype = self.patch_embeddings.proj.weight.dtype
pixel_values_videos = pixel_values_videos.to(dtype=target_dtype)
embeddings = self.patch_embeddings(pixel_values_videos)
return embeddings
# Adapted from transformers.models.vit.modeling_vit.eager_attention_forward
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs,
):
# Take the dot product between "query" and "key" to get the raw attention scores.
attn_weights = torch.matmul(query, key.transpose(-1, -2)) * scaling
# Normalize the attention scores to probabilities.
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
def rotate_queries_or_keys(x, pos):
B, num_heads, N, D = x.size()
# similar to inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2, dtype=torch.float) / dim))
# they are computing this every time. instead HF style is to compute the inv_freq once and store it
# -- compute angle for each position
omega = torch.arange(D // 2, dtype=x.dtype, device=x.device)
omega /= D / 2.0
omega = 1.0 / 10000**omega # (D/2,)
freq = pos.unsqueeze(-1) * omega # (..., N, D/2), outer product
# -- build rotation matrix and apply
emb_sin = freq.sin() # (..., N, D/2)
emb_cos = freq.cos() # (..., N, D/2)
emb_sin = emb_sin.squeeze(-1).repeat(1, 1, 1, 2)
emb_cos = emb_cos.squeeze(-1).repeat(1, 1, 1, 2)
# --
y = x.unflatten(-1, (-1, 2))
y1, y2 = y.unbind(dim=-1)
y = torch.stack((-y2, y1), dim=-1)
y = y.flatten(-2)
return (x * emb_cos) + (y * emb_sin)
class VJEPA2RopeAttention(nn.Module):
def __init__(
self,
config: VJEPA2Config,
hidden_size: int = 1024,
num_attention_heads: int = 16,
):
super().__init__()
self.config = config
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
if hidden_size % num_attention_heads != 0:
raise ValueError(
f"The hidden size {(hidden_size,)} is not a multiple of the number of attention "
f"heads {num_attention_heads}."
)
self.attention_head_size = int(hidden_size / num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(hidden_size, self.all_head_size, bias=config.qkv_bias)
self.key = nn.Linear(hidden_size, self.all_head_size, bias=config.qkv_bias)
self.value = nn.Linear(hidden_size, self.all_head_size, bias=config.qkv_bias)
self.proj = nn.Linear(hidden_size, hidden_size)
self.dropout_prob = config.attention_probs_dropout_prob
self.dropout = nn.Dropout(self.dropout_prob)
self.grid_size = self.config.crop_size // self.config.patch_size
self.grid_depth = self.config.frames_per_clip // self.config.tubelet_size
self.d_dim = int(2 * ((self.attention_head_size // 3) // 2))
self.h_dim = int(2 * ((self.attention_head_size // 3) // 2))
self.w_dim = int(2 * ((self.attention_head_size // 3) // 2))
self.scaling = self.attention_head_size**-0.5
self.is_causal = False
def _get_frame_pos(self, ids):
tokens_per_frame = int(self.grid_size * self.grid_size)
return ids // tokens_per_frame
def _get_height_pos(self, ids):
# Remove frame component from ids
tokens_per_frame = int(self.grid_size * self.grid_size)
frame_ids = self._get_frame_pos(ids)
ids = ids - tokens_per_frame * frame_ids
# --
tokens_per_row = self.grid_size
return ids // tokens_per_row
def get_position_ids(self, x, masks=None):
device = x.device
token_size = x.size(1)
# Note: when masks is none, we use a 1d id instead of Bxnum_attention_heads mask,
# as 1d vector is broadcasted to the correct shapes.
if masks is not None:
ids = masks.unsqueeze(1).repeat(1, self.num_attention_heads, 1)
else:
ids = torch.arange(token_size, device=device)
# change to allow for extrapolation
tokens_per_frame = int(self.grid_size * self.grid_size)
frame_ids = self._get_frame_pos(ids)
# --
tokens_per_row = self.grid_size
height_ids = self._get_height_pos(ids)
# --
# Remove frame component from ids (1st term) and height component (2nd term)
width_ids = (ids - tokens_per_frame * frame_ids) - tokens_per_row * height_ids
return frame_ids, height_ids, width_ids
def apply_rotary_embeddings(self, qk, pos_ids):
d_mask, h_mask, w_mask = pos_ids
s = 0
qkd = rotate_queries_or_keys(qk[..., s : s + self.d_dim], pos=d_mask)
s += self.d_dim
qkh = rotate_queries_or_keys(qk[..., s : s + self.h_dim], pos=h_mask)
s += self.h_dim
qkw = rotate_queries_or_keys(qk[..., s : s + self.w_dim], pos=w_mask)
s += self.w_dim
# Combine rotated dimension
if s < self.attention_head_size:
qkr = qk[..., s:]
qk = torch.cat([qkd, qkh, qkw, qkr], dim=-1)
else:
qk = torch.cat([qkd, qkh, qkw], dim=-1)
return qk
def forward(
self,
hidden_states,
position_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Union[tuple[torch.Tensor, torch.Tensor], tuple[torch.Tensor]]:
batch_size, seq_length, _ = hidden_states.shape
query_layer = (
self.query(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
key_layer = (
self.key(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
value_layer = (
self.value(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
pos_ids = self.get_position_ids(hidden_states, masks=position_mask)
key_layer = self.apply_rotary_embeddings(key_layer, pos_ids)
query_layer = self.apply_rotary_embeddings(query_layer, pos_ids)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
context_layer, attention_probs = attention_interface(
self,
query_layer,
key_layer,
value_layer,
None,
is_causal=self.is_causal,
scaling=self.scaling,
dropout=0.0 if not self.training else self.dropout_prob,
)
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = self.proj(context_layer.reshape(new_context_layer_shape))
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
# Adapted from transformers.models.beit.modeling_dinov2.drop_path
def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
if drop_prob == 0.0 or not training:
return input
keep_prob = 1 - drop_prob
shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
random_tensor.floor_() # binarize
output = input.div(keep_prob) * random_tensor
return output
# Adapted from transformers.models.beit.modeling_beit.BeitDropPath
class VJEPA2DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob: Optional[float] = None):
super().__init__()
self.drop_prob = drop_prob
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return drop_path(hidden_states, self.drop_prob, self.training)
def extra_repr(self) -> str:
return f"p={self.drop_prob}"
class VJEPA2MLP(nn.Module):
def __init__(self, config: VJEPA2Config, hidden_size: int = 1024, mlp_ratio: float = 4.0):
super().__init__()
in_features = out_features = hidden_size
hidden_features = int(hidden_size * mlp_ratio)
self.fc1 = nn.Linear(in_features, hidden_features, bias=True)
self.activation = ACT2FN[config.hidden_act]
self.fc2 = nn.Linear(hidden_features, out_features, bias=True)
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
hidden_state = self.fc1(hidden_state)
hidden_state = self.activation(hidden_state)
hidden_state = self.fc2(hidden_state)
return hidden_state
class VJEPA2Layer(GradientCheckpointingLayer):
"""This corresponds to the Block class in the original implementation."""
def __init__(
self,
config: VJEPA2Config,
drop_path_rate: float = 0.0,
hidden_size: int = 1024,
num_attention_heads: int = 16,
mlp_ratio: float = 4.0,
):
super().__init__()
self.config = config
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.mlp_ratio = mlp_ratio
self.norm1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps)
self.attention = VJEPA2RopeAttention(config, hidden_size, num_attention_heads)
self.drop_path = VJEPA2DropPath(drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity()
self.norm2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps)
self.mlp = VJEPA2MLP(config, hidden_size=hidden_size, mlp_ratio=mlp_ratio)
def forward(
self,
hidden_states: torch.Tensor,
position_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> tuple[torch.Tensor, ...]:
# Self-Attention
residual = hidden_states
hidden_states = self.norm1(hidden_states)
self_attention_outputs = self.attention(
hidden_states,
position_mask=position_mask, # position mask for context/target selection
output_attentions=output_attentions,
)
attention_output = self_attention_outputs[0]
hidden_states = self.drop_path(attention_output) + residual
# MLP
residual = hidden_states
hidden_states = self.norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = self.drop_path(hidden_states) + residual
# Add self attentions if we output attention weights
outputs = self_attention_outputs[1:]
outputs = (hidden_states,) + outputs
return outputs
class VJEPA2Encoder(nn.Module):
def __init__(self, config: VJEPA2Config):
super().__init__()
self.config = config
self.embeddings = VJEPA2Embeddings(config, hidden_size=config.hidden_size)
drop_path_rates = [
(config.drop_path_rate * i / (config.num_hidden_layers - 1) if config.num_hidden_layers > 1 else 0.0)
for i in range(config.num_hidden_layers)
]
self.layer = nn.ModuleList(
[
VJEPA2Layer(
config,
drop_path_rate=drop_path_rates[i],
hidden_size=config.hidden_size,
num_attention_heads=config.num_attention_heads,
mlp_ratio=config.mlp_ratio,
)
for i in range(config.num_hidden_layers)
]
)
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.gradient_checkpointing = False
@can_return_tuple
def forward(
self,
pixel_values_videos: Optional[torch.Tensor] = None,
output_attentions: bool = False,
output_hidden_states: bool = False,
**kwargs,
) -> BaseModelOutput:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
hidden_states = self.embeddings(pixel_values_videos)
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(hidden_states, None, output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
hidden_states = self.layernorm(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
def apply_masks(tensor: torch.Tensor, masks: list[torch.Tensor]) -> torch.Tensor:
"""
Args:
tensor (`torch.Tensor`):
Tensor of shape [batch_size, num_patches, feature_dim]
masks (`List[torch.Tensor]`):
List of tensors of shape [batch_size, num_patches] containing indices of patches to keep
"""
all_masked_tensors = []
for mask in masks:
mask = mask.to(tensor.device)
mask_keep = mask.unsqueeze(-1).repeat(1, 1, tensor.size(-1))
all_masked_tensors += [torch.gather(tensor, dim=1, index=mask_keep)]
return torch.cat(all_masked_tensors, dim=0)
class VJEPA2PredictorEmbeddings(nn.Module):
"""
Construct mask token, position and patch embeddings.
"""
def __init__(self, config: VJEPA2Config):
super().__init__()
self.config = config
self.predictor_embeddings = nn.Linear(config.hidden_size, config.pred_hidden_size)
self.num_mask_tokens = 0
self.zero_init_mask_tokens = config.pred_zero_init_mask_tokens
self.num_mask_tokens = config.pred_num_mask_tokens
self.mask_tokens = nn.Parameter(torch.zeros(self.num_mask_tokens, 1, 1, config.pred_hidden_size))
self.patch_size = config.patch_size
self.config = config
@staticmethod
def num_patches(config):
if config.frames_per_clip > 1:
return (
(config.frames_per_clip // config.tubelet_size)
* (config.crop_size // config.patch_size)
* (config.crop_size // config.patch_size)
)
else:
return (config.crop_size // config.patch_size) * (config.crop_size // config.patch_size)
def forward(
self,
hidden_states: torch.Tensor,
context_mask: list[torch.Tensor],
target_mask: list[torch.Tensor],
mask_index: int = 1,
) -> tuple[torch.Tensor, torch.Tensor]:
"""
hidden_states : encoder outputs (context)
context_mask: tokens of the context (outputs from the encoder)
target_mask: tokens to predict
mask_index: index of the target mask to choose (useful for multiclip?)
"""
B = hidden_states.size(0)
context = self.predictor_embeddings(hidden_states)
# Make target tokens
mask_index = mask_index % self.num_mask_tokens
target = self.mask_tokens[mask_index]
# Note: this is problematic if the config isn't initialized with the right frames_per_clip value,
# e.g. for scenarios if we want to run predictor for more tokens than in the config.
# target = target.repeat(B, self.num_patches(self.config), 1)
# Remedy: use the provided target mask to get the max patch num
max_patch_num = target_mask[0].max() + 1 # one extra to include the last patch
target = target.repeat(B, max_patch_num, 1)
target = apply_masks(target, target_mask)
# Concatenate context & target tokens
context = context.repeat(len(context_mask), 1, 1)
embeddings = torch.cat([context, target], dim=1)
# Positions of context & target tokens
cm = torch.cat(context_mask, dim=0)
tm = torch.cat(target_mask, dim=0)
masks = torch.cat([cm, tm], dim=1)
return embeddings, masks
class VJEPA2Predictor(nn.Module):
def __init__(self, config: VJEPA2Config):
super().__init__()
self.config = config
self.gradient_checkpointing = False
self.embeddings = VJEPA2PredictorEmbeddings(config)
drop_path_rates = [
(
config.drop_path_rate * i / (config.pred_num_hidden_layers - 1)
if config.pred_num_hidden_layers > 1
else 0.0
)
for i in range(config.pred_num_hidden_layers)
]
self.layer = nn.ModuleList(
[
VJEPA2Layer(
config,
drop_path_rate=drop_path_rates[i],
hidden_size=config.pred_hidden_size,
num_attention_heads=config.pred_num_attention_heads,
mlp_ratio=config.pred_mlp_ratio,
)
for i in range(config.pred_num_hidden_layers)
]
)
self.layernorm = nn.LayerNorm(config.pred_hidden_size, eps=config.layer_norm_eps)
self.proj = nn.Linear(config.pred_hidden_size, config.hidden_size, bias=True)
def sort_tokens(self, hidden_states, position_masks, argsort):
# gather position masks
argsort = argsort.to(position_masks.device)
position_masks = torch.gather(position_masks, dim=1, index=argsort)
# gather hidden states
argsort = argsort.to(hidden_states.device)
hidden_states_argsort = argsort.unsqueeze(-1).expand(-1, -1, hidden_states.size(-1))
hidden_states = torch.gather(hidden_states, dim=1, index=hidden_states_argsort)
return hidden_states, position_masks
def unsort_tokens(self, hidden_states, argsort):
argsort = argsort.to(hidden_states.device)
reverse_argsort = torch.argsort(argsort, dim=1)
reverse_argsort = reverse_argsort.unsqueeze(-1).expand(-1, -1, hidden_states.size(-1))
hidden_states = torch.gather(hidden_states, dim=1, index=reverse_argsort)
return hidden_states
@can_return_tuple
def forward(
self,
encoder_hidden_states: torch.Tensor,
context_mask: list[torch.Tensor],
target_mask: list[torch.Tensor],
output_attentions: bool = False,
output_hidden_states: bool = False,
**kwargs,
) -> BaseModelOutput:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
# mask out the encoder hidden states
# this is implemented here as in VJEPA training a separate encoder is used for target
encoder_hidden_states = apply_masks(encoder_hidden_states, context_mask)
_, N_ctxt, D = encoder_hidden_states.shape
hidden_states, position_masks = self.embeddings(encoder_hidden_states, context_mask, target_mask)
# Put tokens in sorted order
argsort = torch.argsort(position_masks, dim=1) # [B, N]
hidden_states, position_masks = self.sort_tokens(hidden_states, position_masks, argsort)
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(hidden_states, position_masks, output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
hidden_states = self.layernorm(hidden_states)
# unsort and extract the predicted tokens
hidden_states = self.unsort_tokens(hidden_states, argsort)
hidden_states = hidden_states[:, N_ctxt:]
# projection
hidden_states = self.proj(hidden_states)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
class VJEPA2PoolerSelfAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: VJEPA2Config):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
f" {self.num_heads})."
)
self.scale = self.head_dim**-0.5
self.dropout = config.attention_dropout
self.is_causal = False
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
"""Input shape: Batch x Time x Channel"""
batch_size, seq_length, embed_dim = hidden_states.shape
queries = self.q_proj(hidden_states)
keys = self.k_proj(hidden_states)
values = self.v_proj(hidden_states)
queries = queries.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
keys = keys.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
values = values.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
queries,
keys,
values,
attention_mask,
is_causal=self.is_causal,
scaling=self.scale,
dropout=0.0 if not self.training else self.dropout,
)
attn_output = attn_output.reshape(batch_size, seq_length, embed_dim).contiguous()
attn_output = self.out_proj(attn_output)
if not output_attentions:
attn_weights = None
return attn_output, attn_weights
class VJEPA2PoolerCrossAttention(nn.Module):
"""It's different from other cross-attention layers, doesn't have output projection layer (o_proj)"""
# in case of modular refactoring - o_proj can be replaces with nn.Identity()
def __init__(self, config: VJEPA2Config):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
f" {self.num_heads})."
)
self.scale = self.head_dim**-0.5
self.dropout = config.attention_dropout
self.is_causal = False
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
def forward(
self,
queries: torch.Tensor,
keys: torch.Tensor,
values: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
"""Input shape: Batch x Time x Channel"""
batch_size, q_seq_length, embed_dim = queries.shape
kv_seq_length = keys.shape[1]
queries = self.q_proj(queries)
keys = self.k_proj(keys)
values = self.v_proj(values)
queries = queries.view(batch_size, q_seq_length, self.num_heads, self.head_dim).transpose(1, 2)
keys = keys.view(batch_size, kv_seq_length, self.num_heads, self.head_dim).transpose(1, 2)
values = values.view(batch_size, kv_seq_length, self.num_heads, self.head_dim).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
queries,
keys,
values,
attention_mask,
is_causal=self.is_causal,
scaling=self.scale,
dropout=0.0 if not self.training else self.dropout,
)
attn_output = attn_output.reshape(batch_size, q_seq_length, embed_dim).contiguous()
if not output_attentions:
attn_weights = None
return attn_output, attn_weights
# Modified from SiglipEncoderLayer, but we have to propagate proper hidden_size to VJEPA2MLP
class VJEPA2PoolerSelfAttentionLayer(GradientCheckpointingLayer):
def __init__(self, config: VJEPA2Config):
super().__init__()
self.layer_norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.self_attn = VJEPA2PoolerSelfAttention(config)
self.layer_norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.mlp = VJEPA2MLP(config, hidden_size=config.hidden_size)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
output_attentions: Optional[bool] = False,
) -> tuple[torch.Tensor, ...]:
"""
Args:
hidden_states (`torch.FloatTensor`):
Input to the layer of shape `(batch, seq_len, embed_dim)`.
attention_mask (`torch.FloatTensor`):
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | true |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/aya_vision/modular_aya_vision.py | src/transformers/models/aya_vision/modular_aya_vision.py | # coding=utf-8
# Copyright 2025 the Cohere Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch AyaVision model."""
from typing import Optional, Union
import torch
from torch import nn
from transformers.models.llava.modeling_llava import (
LlavaCausalLMOutputWithPast,
LlavaForConditionalGeneration,
LlavaModel,
LlavaModelOutputWithPast,
LlavaPreTrainedModel,
TransformersKwargs,
)
from ...activations import ACT2FN
from ...cache_utils import Cache
from ...processing_utils import Unpack
from ...utils import auto_docstring, logging
from ...utils.generic import check_model_inputs
from .configuration_aya_vision import AyaVisionConfig
logger = logging.get_logger(__name__)
class AyaVisionMultiModalProjector(nn.Module):
def __init__(self, config: AyaVisionConfig):
super().__init__()
self.config = config
self.downsample_factor = config.downsample_factor
self.alignment_intermediate_size = getattr(
config, "alignment_intermediate_size", config.text_config.hidden_size
)
self.layernorm = nn.LayerNorm(
config.vision_config.hidden_size * (config.downsample_factor**2), eps=config.adapter_layer_norm_eps
)
self.linear_1 = nn.Linear(
config.vision_config.hidden_size * (config.downsample_factor**2),
self.alignment_intermediate_size,
bias=True,
)
self.act = ACT2FN["silu"] # SwiGLU uses SiLU activation
# For SwiGLU, project down to half size since we split intermediate dim
self.linear_2 = nn.Linear(self.alignment_intermediate_size // 2, config.text_config.hidden_size, bias=True)
def forward(self, image_features):
image_features = self.pixel_shuffle(image_features)
image_features = self.layernorm(image_features)
hidden_states = self.linear_1(image_features)
# Split along last dimension and apply SwiGLU
x, gate = hidden_states.chunk(2, dim=-1)
hidden_states = self.act(gate) * x
hidden_states = self.linear_2(hidden_states)
return hidden_states
def pixel_shuffle(self, image_features): # B, S, D
batch_size, seq_length, feature_dim = image_features.shape
height = width = int(seq_length**0.5)
image_features = image_features.reshape(image_features.shape[0], width, height, -1)
channels = image_features.shape[-1]
image_features = image_features.reshape(
batch_size, width, int(height / self.downsample_factor), int(channels * self.downsample_factor)
)
image_features = image_features.permute(0, 2, 1, 3)
image_features = image_features.reshape(
batch_size, int(height / self.downsample_factor), int(width / self.downsample_factor), -1
)
image_features = image_features.permute(0, 2, 1, 3)
return image_features
class AyaVisionPreTrainedModel(LlavaPreTrainedModel):
_can_compile_fullgraph = False
_can_record_outputs = {
"hidden_states": "DecoderLayer",
"attentions": "Attention",
}
class AyaVisionCausalLMOutputWithPast(LlavaCausalLMOutputWithPast):
pass
class AyaVisionModelOutputWithPast(LlavaModelOutputWithPast):
pass
class AyaVisionModel(LlavaModel):
# Unlike LLaVA, the model doesn't have to deal with Pixtral-style image states
def get_image_features(
self,
pixel_values: torch.FloatTensor,
vision_feature_layer: Optional[Union[int, list[int]]] = None,
vision_feature_select_strategy: Optional[str] = None,
**kwargs,
):
"""
Obtains image last hidden states from the vision tower and apply multimodal projection.
Args:
pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`):
The tensors corresponding to the input images.
vision_feature_layer (`Union[int, list[int]]`, *optional*):
The index of the layer to select the vision feature. If multiple indices are provided,
the vision feature of the corresponding indices will be concatenated to form the
vision features.
vision_feature_select_strategy (`str`, *optional*):
The feature selection strategy used to select the vision feature from the vision backbone.
Can be one of `"default"` or `"full"`
Returns:
image_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`).
"""
vision_feature_layer = (
vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer
)
vision_feature_select_strategy = (
vision_feature_select_strategy
if vision_feature_select_strategy is not None
else self.config.vision_feature_select_strategy
)
if vision_feature_select_strategy not in ["default", "full"]:
raise ValueError(f"Unexpected select feature strategy: {self.config.vision_feature_select_strategy}")
kwargs = {k: v for k, v in kwargs.items() if v is not None}
# this is not memory efficient at all (output_hidden_states=True) will save all the hidden states.
image_outputs = self.vision_tower(pixel_values, output_hidden_states=True, **kwargs)
# If we have one vision feature layer, return the corresponding hidden states,
# otherwise, select the hidden states of each feature layer and concatenate them
if isinstance(vision_feature_layer, int):
selected_image_feature = image_outputs.hidden_states[vision_feature_layer]
if vision_feature_select_strategy == "default":
selected_image_feature = selected_image_feature[:, 1:]
else:
hs_pool = [image_outputs.hidden_states[layer_idx] for layer_idx in vision_feature_layer]
# For default; crop CLS from each hidden state in the hidden state pool
if vision_feature_select_strategy == "default":
hs_pool = [hs[:, 1:] for hs in hs_pool]
selected_image_feature = torch.cat(hs_pool, dim=-1)
image_features = self.multi_modal_projector(selected_image_feature)
return image_features
@check_model_inputs
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
vision_feature_layer: Optional[Union[int, list[int]]] = None,
vision_feature_select_strategy: Optional[str] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, AyaVisionModelOutputWithPast]:
vision_feature_layer = (
vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer
)
vision_feature_select_strategy = (
vision_feature_select_strategy
if vision_feature_select_strategy is not None
else self.config.vision_feature_select_strategy
)
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
if pixel_values is not None:
image_features = self.get_image_features(
pixel_values=pixel_values,
vision_feature_layer=vision_feature_layer,
vision_feature_select_strategy=vision_feature_select_strategy,
)
image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype)
special_image_mask = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, image_features=image_features
)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
outputs = self.language_model(
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
return AyaVisionModelOutputWithPast(
last_hidden_state=outputs.last_hidden_state,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=image_features if pixel_values is not None else None,
)
class AyaVisionForConditionalGeneration(LlavaForConditionalGeneration):
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
vision_feature_layer: Optional[Union[int, list[int]]] = None,
vision_feature_select_strategy: Optional[str] = None,
labels: Optional[torch.LongTensor] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
image_sizes: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, AyaVisionCausalLMOutputWithPast]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoProcessor, AyaVisionForConditionalGeneration
>>> import torch
>>> torch_device = "cuda:0"
>>> processor = AutoProcessor.from_pretrained("CohereForAI/aya-vision-8b", use_fast=True)
>>> model = AyaVisionForConditionalGeneration.from_pretrained("CohereForAI/aya-vision-8b", device_map=torch_device)
>>> messages = [
... {
... "role": "user",
... "content": [
... {
... "type": "image",
... "url": "https://pbs.twimg.com/media/Fx7YvfQWYAIp6rZ?format=jpg&name=medium",
... },
... {"type": "text", "text": "चित्र में लिखा पाठ क्या कहता है?"},
... ],
... }
... ]
>>> inputs = processor.apply_chat_template(
... messages, padding=True, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt", device=torch_device
... ).to(model.device)
>>> gen_tokens = model.generate(**inputs, max_new_tokens=300, do_sample=True, temperature=0.3)
>>> processor.tokenizer.decode(gen_tokens[0][inputs.input_ids.shape[1]:], skip_special_tokens=True)
```"""
super().forward(
input_ids=input_ids,
pixel_values=pixel_values,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
vision_feature_layer=vision_feature_layer,
vision_feature_select_strategy=vision_feature_select_strategy,
labels=labels,
cache_position=cache_position,
logits_to_keep=logits_to_keep,
image_sizes=image_sizes,
**kwargs,
)
__all__ = ["AyaVisionForConditionalGeneration", "AyaVisionPreTrainedModel", "AyaVisionModel"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/aya_vision/processing_aya_vision.py | src/transformers/models/aya_vision/processing_aya_vision.py | # coding=utf-8
# Copyright 2025 HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput, make_flat_list_of_images
from ...processing_utils import MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack
from ...tokenization_utils_base import PreTokenizedInput, TextInput
class AyaVisionProcessorKwargs(ProcessingKwargs, total=False):
_defaults = {
"text_kwargs": {
"padding_side": "left",
"padding": True,
"return_mm_token_type_ids": False,
},
"images_kwargs": {
"crop_to_patches": True,
},
}
class AyaVisionProcessor(ProcessorMixin):
r"""
Constructs a AyaVision processor which wraps a [`AutoImageProcessor`] and
[`PretrainedTokenizerFast`] tokenizer into a single processor that inherits both the image processor and
tokenizer functionalities. See the [`~AyaVisionProcessor.__call__`] and [`~AyaVisionProcessor.decode`] for more information.
Args:
image_processor ([`AutoImageProcessor`], *optional*):
The image processor is a required input.
tokenizer ([`PreTrainedTokenizer`, `PreTrainedTokenizerFast`], *optional*):
The tokenizer is a required input.
patch_size (`int`, *optional*, defaults to 28):
The size of image patches for tokenization.
img_size (`int`, *optional*, defaults to 364):
The size of the image to be tokenized. This should correspond to the size given to the image processor.
image_token (`str`, *optional*, defaults to `"<image>"`):
The token to be used to represent an image in the text.
downsample_factor (`int`, *optional*, defaults to 1):
The factor by which to scale the patch size.
start_of_img_token (`str`, *optional*, defaults to `"<|START_OF_IMG|>"`):
The token to be used to represent the start of an image in the text.
end_of_img_token (`str`, *optional*, defaults to `"<|END_OF_IMG|>"`):
The token to be used to represent the end of an image in the text.
img_patch_token (`str`, *optional*, defaults to `"<|IMG_PATCH|>"`):
The token to be used to represent an image patch in the text.
img_line_break_token (`str`, *optional*, defaults to `"<|IMG_LINE_BREAK|>"`):
The token to be used to represent a line break in the text.
tile_token (`str`, *optional*, defaults to `"TILE"`):
The token to be used to represent an image patch in the text.
tile_global_token (`str`, *optional*, defaults to `"TILE_GLOBAL"`):
The token to be used to represent the cover image in the text.
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
"""
def __init__(
self,
image_processor=None,
tokenizer=None,
patch_size: int = 28,
img_size: int = 364,
image_token="<image>", # set the default and let users change if they have peculiar special tokens in rare cases
downsample_factor: int = 1,
start_of_img_token="<|START_OF_IMG|>",
end_of_img_token="<|END_OF_IMG|>",
img_patch_token="<|IMG_PATCH|>",
img_line_break_token="<|IMG_LINE_BREAK|>",
tile_token="TILE",
tile_global_token="TILE_GLOBAL",
chat_template=None,
**kwargs,
):
super().__init__(image_processor, tokenizer, chat_template=chat_template)
self.image_token = image_token
self.patch_size = patch_size * downsample_factor
self.img_size = img_size
self.start_of_img_token = start_of_img_token
self.end_of_img_token = end_of_img_token
self.img_patch_token = img_patch_token
self.img_line_break_token = img_line_break_token
self.tile_token = tile_token
self.tile_global_token = tile_global_token
self.image_token_id = tokenizer.convert_tokens_to_ids(self.img_patch_token)
self.image_ids = tokenizer.convert_tokens_to_ids(
[img_patch_token, tile_token, tile_global_token, start_of_img_token, end_of_img_token]
)
def _prompt_split_image(self, num_patches):
"""
Create a structured string representation of image tokens
Args:
num_patches: Number of patches in the image
Returns:
String with appropriate image tokens
"""
img_patches_per_tile = (self.img_size // self.patch_size) ** 2
img_string = f"{self.start_of_img_token}"
if num_patches > 1:
for idx in range(1, num_patches):
img_string += f"{self.tile_token}_{idx}" + f"{self.img_patch_token}" * img_patches_per_tile
img_string += f"{self.tile_global_token}" + f"{self.img_patch_token}" * img_patches_per_tile
img_string += f"{self.end_of_img_token}"
return img_string
def __call__(
self,
images: Optional[ImageInput] = None,
text: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]] = None,
**kwargs: Unpack[AyaVisionProcessorKwargs],
) -> BatchFeature:
"""
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
and `kwargs` arguments to PreTrainedTokenizerFast's [`~PreTrainedTokenizerFast.__call__`] to encode the text.
To prepare the vision inputs, this method forwards the `images` and `kwargs` arguments to
GotOcr2ImageProcessor's [`~GotOcr2ImageProcessor.__call__`] if `images` is not `None`.
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
"""
if text is None:
raise ValueError("You have to specify text.")
output_kwargs = self._merge_kwargs(
AyaVisionProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
if not isinstance(text, (list, tuple)):
text = [text]
# Process images
image_inputs = {}
if images is not None:
images = self.image_processor.fetch_images(images)
images = make_flat_list_of_images(images)
image_inputs = self.image_processor(images=images, **output_kwargs["images_kwargs"])
num_patches = image_inputs.pop("num_patches")
image_index = 0
processed_text = []
for prompt in text:
new_prompt = prompt
while "<image>" in new_prompt:
# Replace the image placeholder with structured image tokens
image_tokens = self._prompt_split_image(num_patches[image_index])
new_prompt = new_prompt.replace("<image>", image_tokens, 1)
image_index += 1
processed_text.append(new_prompt)
if image_index != len(images):
raise ValueError("Number of image placeholders in the prompt does not match the number of images.")
text = processed_text
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", False)
text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"], return_tensors=None)
if return_mm_token_type_ids:
array_ids = np.array(text_inputs["input_ids"])
mm_token_type_ids = np.zeros_like(text_inputs["input_ids"])
mm_token_type_ids[np.isin(array_ids, self.image_ids)] = 1
text_inputs["mm_token_type_ids"] = mm_token_type_ids.tolist()
return BatchFeature(data={**text_inputs, **image_inputs}, tensor_type=return_tensors)
def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):
"""
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (`list[list[int]]`, *optional*):
The input sizes formatted as (height, width) per each image.
Returns:
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
input modalities, along with other useful data.
"""
vision_data = {}
if image_sizes is not None:
images_kwargs = AyaVisionProcessorKwargs._defaults.get("images_kwargs", {})
images_kwargs.update(kwargs)
num_image_patches = [
self.image_processor.get_number_of_image_patches(*image_size, images_kwargs)
for image_size in image_sizes
]
token_per_patch = (self.img_size // self.patch_size) ** 2
num_image_tokens = [
token_per_patch + 3 + sum(token_per_patch + 1 for _ in range(1, num_patches))
for num_patches in num_image_patches
] # Add +3 and +1 for BOI/EOI and image tile tokens
vision_data.update({"num_image_tokens": num_image_tokens, "num_image_patches": num_image_patches})
return MultiModalData(**vision_data)
__all__ = ["AyaVisionProcessor"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/aya_vision/configuration_aya_vision.py | src/transformers/models/aya_vision/configuration_aya_vision.py | # coding=utf-8
# Copyright 2025 Cohere team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AyaVision model configuration"""
from ...configuration_utils import PreTrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING, AutoConfig
logger = logging.get_logger(__name__)
class AyaVisionConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`AyaVisionForConditionalGeneration`]. It is used to instantiate an
AyaVision model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of AyaVision.
e.g. [CohereForAI/aya-vision-8b](https://huggingface.co/CohereForAI/aya-vision-8b)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `SiglipVisionConfig`):
The config object or dictionary of the vision backbone.
text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `Cohere2Config`):
The config object or dictionary of the text backbone.
vision_feature_select_strategy (`str`, *optional*, defaults to `"full"`):
The feature selection strategy used to select the vision feature from the vision backbone.
Can be one of `"default"` or `"full"`. If `"default"`, the CLS token is removed from the vision features.
If `"full"`, the full vision features are used.
vision_feature_layer (`int`, *optional*, defaults to -1):
The index of the layer to select the vision feature.
downsample_factor (`int`, *optional*, defaults to 2):
The downsample factor to apply to the vision features.
adapter_layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon value used for layer normalization in the adapter.
image_token_index (`int`, *optional*, defaults to 255036):
The image token index to encode the image prompt.
"""
model_type = "aya_vision"
attribute_map = {
"image_token_id": "image_token_index",
}
sub_configs = {"text_config": AutoConfig, "vision_config": AutoConfig}
def __init__(
self,
vision_config=None,
text_config=None,
vision_feature_select_strategy="full",
vision_feature_layer=-1,
downsample_factor=2,
adapter_layer_norm_eps=1e-6,
image_token_index=255036,
**kwargs,
):
self.image_token_index = image_token_index
self.downsample_factor = downsample_factor
self.adapter_layer_norm_eps = adapter_layer_norm_eps
if vision_feature_select_strategy not in ["default", "full"]:
raise ValueError(
"vision_feature_select_strategy should be one of 'default', 'full'."
f"Got: {vision_feature_select_strategy}"
)
self.vision_feature_select_strategy = vision_feature_select_strategy
self.vision_feature_layer = vision_feature_layer
if isinstance(vision_config, dict):
vision_config["model_type"] = vision_config.get("model_type", "siglip_vision_model")
vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config)
elif vision_config is None:
vision_config = CONFIG_MAPPING["siglip_vision_model"](
hidden_size=1152,
intermediate_size=4304,
patch_size=14,
image_size=384,
num_hidden_layers=26,
num_attention_heads=14,
vision_use_head=False,
)
self.vision_config = vision_config
if isinstance(text_config, dict):
text_config["model_type"] = text_config.get("model_type", "cohere2")
text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
elif text_config is None:
text_config = CONFIG_MAPPING["cohere2"]()
self.text_config = text_config
super().__init__(**kwargs)
__all__ = ["AyaVisionConfig"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/aya_vision/__init__.py | src/transformers/models/aya_vision/__init__.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_aya_vision import *
from .modeling_aya_vision import *
from .processing_aya_vision import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/aya_vision/modeling_aya_vision.py | src/transformers/models/aya_vision/modeling_aya_vision.py | # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# This file was automatically generated from src/transformers/models/aya_vision/modular_aya_vision.py.
# Do NOT edit this file manually as any edits will be overwritten by the generation of
# the file from the modular. If any change should be done, please apply the change to the
# modular_aya_vision.py file directly. One of our CI enforces this.
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# coding=utf-8
# Copyright 2025 the Cohere Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Optional, Union
import torch
from torch import nn
from ...activations import ACT2FN
from ...cache_utils import Cache
from ...generation import GenerationMixin
from ...modeling_outputs import BaseModelOutputWithPast, ModelOutput
from ...modeling_utils import PreTrainedModel
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from ...utils.generic import check_model_inputs
from ..auto import AutoModel
from .configuration_aya_vision import AyaVisionConfig
class AyaVisionMultiModalProjector(nn.Module):
def __init__(self, config: AyaVisionConfig):
super().__init__()
self.config = config
self.downsample_factor = config.downsample_factor
self.alignment_intermediate_size = getattr(
config, "alignment_intermediate_size", config.text_config.hidden_size
)
self.layernorm = nn.LayerNorm(
config.vision_config.hidden_size * (config.downsample_factor**2), eps=config.adapter_layer_norm_eps
)
self.linear_1 = nn.Linear(
config.vision_config.hidden_size * (config.downsample_factor**2),
self.alignment_intermediate_size,
bias=True,
)
self.act = ACT2FN["silu"] # SwiGLU uses SiLU activation
# For SwiGLU, project down to half size since we split intermediate dim
self.linear_2 = nn.Linear(self.alignment_intermediate_size // 2, config.text_config.hidden_size, bias=True)
def forward(self, image_features):
image_features = self.pixel_shuffle(image_features)
image_features = self.layernorm(image_features)
hidden_states = self.linear_1(image_features)
# Split along last dimension and apply SwiGLU
x, gate = hidden_states.chunk(2, dim=-1)
hidden_states = self.act(gate) * x
hidden_states = self.linear_2(hidden_states)
return hidden_states
def pixel_shuffle(self, image_features): # B, S, D
batch_size, seq_length, feature_dim = image_features.shape
height = width = int(seq_length**0.5)
image_features = image_features.reshape(image_features.shape[0], width, height, -1)
channels = image_features.shape[-1]
image_features = image_features.reshape(
batch_size, width, int(height / self.downsample_factor), int(channels * self.downsample_factor)
)
image_features = image_features.permute(0, 2, 1, 3)
image_features = image_features.reshape(
batch_size, int(height / self.downsample_factor), int(width / self.downsample_factor), -1
)
image_features = image_features.permute(0, 2, 1, 3)
return image_features
@auto_docstring
class AyaVisionPreTrainedModel(PreTrainedModel):
config: AyaVisionConfig
base_model_prefix = "model"
input_modalities = ("image", "text")
supports_gradient_checkpointing = True
_skip_keys_device_placement = "past_key_values"
_supports_flash_attn = True
_supports_sdpa = True
_can_compile_fullgraph = False
_supports_flex_attn = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": "DecoderLayer",
"attentions": "Attention",
}
@dataclass
@auto_docstring(
custom_intro="""
Base class for AyaVision causal language model (or autoregressive) outputs.
"""
)
class AyaVisionCausalLMOutputWithPast(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
image_hidden_states: Optional[torch.FloatTensor] = None
@dataclass
@auto_docstring(
custom_intro="""
Base class for AyaVision outputs, with hidden states and attentions.
"""
)
class AyaVisionModelOutputWithPast(BaseModelOutputWithPast):
r"""
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
"""
image_hidden_states: Optional[torch.FloatTensor] = None
@auto_docstring(
custom_intro="""
The AyaVision model which consists of a vision backbone and a language model, without a language modeling head.
"""
)
class AyaVisionModel(AyaVisionPreTrainedModel):
_checkpoint_conversion_mapping = {
r"^language_model.model": "language_model",
}
def __init__(self, config: AyaVisionConfig):
super().__init__(config)
self.vision_tower = AutoModel.from_config(config.vision_config)
self.multi_modal_projector = AyaVisionMultiModalProjector(config)
self.language_model = AutoModel.from_config(config.text_config)
self.post_init()
def get_input_embeddings(self):
return self.language_model.get_input_embeddings()
def set_input_embeddings(self, value):
self.language_model.set_input_embeddings(value)
def get_image_features(
self,
pixel_values: torch.FloatTensor,
vision_feature_layer: Optional[Union[int, list[int]]] = None,
vision_feature_select_strategy: Optional[str] = None,
**kwargs,
):
"""
Obtains image last hidden states from the vision tower and apply multimodal projection.
Args:
pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`):
The tensors corresponding to the input images.
vision_feature_layer (`Union[int, list[int]]`, *optional*):
The index of the layer to select the vision feature. If multiple indices are provided,
the vision feature of the corresponding indices will be concatenated to form the
vision features.
vision_feature_select_strategy (`str`, *optional*):
The feature selection strategy used to select the vision feature from the vision backbone.
Can be one of `"default"` or `"full"`
Returns:
image_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`).
"""
vision_feature_layer = (
vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer
)
vision_feature_select_strategy = (
vision_feature_select_strategy
if vision_feature_select_strategy is not None
else self.config.vision_feature_select_strategy
)
if vision_feature_select_strategy not in ["default", "full"]:
raise ValueError(f"Unexpected select feature strategy: {self.config.vision_feature_select_strategy}")
kwargs = {k: v for k, v in kwargs.items() if v is not None}
# this is not memory efficient at all (output_hidden_states=True) will save all the hidden states.
image_outputs = self.vision_tower(pixel_values, output_hidden_states=True, **kwargs)
# If we have one vision feature layer, return the corresponding hidden states,
# otherwise, select the hidden states of each feature layer and concatenate them
if isinstance(vision_feature_layer, int):
selected_image_feature = image_outputs.hidden_states[vision_feature_layer]
if vision_feature_select_strategy == "default":
selected_image_feature = selected_image_feature[:, 1:]
else:
hs_pool = [image_outputs.hidden_states[layer_idx] for layer_idx in vision_feature_layer]
# For default; crop CLS from each hidden state in the hidden state pool
if vision_feature_select_strategy == "default":
hs_pool = [hs[:, 1:] for hs in hs_pool]
selected_image_feature = torch.cat(hs_pool, dim=-1)
image_features = self.multi_modal_projector(selected_image_feature)
return image_features
def get_placeholder_mask(
self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor, image_features: torch.FloatTensor
):
"""
Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is
equal to the length of multimodal features. If the lengths are different, an error is raised.
"""
if input_ids is None:
special_image_mask = inputs_embeds == self.get_input_embeddings()(
torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device)
)
special_image_mask = special_image_mask.all(-1)
else:
special_image_mask = input_ids == self.config.image_token_id
n_image_tokens = special_image_mask.sum()
special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
n_image_features = image_features.shape[0] * image_features.shape[1]
if inputs_embeds[special_image_mask].numel() != image_features.numel():
raise ValueError(
f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {n_image_features}"
)
return special_image_mask
@check_model_inputs
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
vision_feature_layer: Optional[Union[int, list[int]]] = None,
vision_feature_select_strategy: Optional[str] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, AyaVisionModelOutputWithPast]:
vision_feature_layer = (
vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer
)
vision_feature_select_strategy = (
vision_feature_select_strategy
if vision_feature_select_strategy is not None
else self.config.vision_feature_select_strategy
)
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
if pixel_values is not None:
image_features = self.get_image_features(
pixel_values=pixel_values,
vision_feature_layer=vision_feature_layer,
vision_feature_select_strategy=vision_feature_select_strategy,
)
image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype)
special_image_mask = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, image_features=image_features
)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
outputs = self.language_model(
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
return AyaVisionModelOutputWithPast(
last_hidden_state=outputs.last_hidden_state,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=image_features if pixel_values is not None else None,
)
@auto_docstring(
custom_intro="""
The AYA_VISION model which consists of a vision backbone and a language model.
"""
)
class AyaVisionForConditionalGeneration(AyaVisionPreTrainedModel, GenerationMixin):
_checkpoint_conversion_mapping = {
r"^language_model.model": "model.language_model",
r"^vision_tower": "model.vision_tower",
r"^multi_modal_projector": "model.multi_modal_projector",
r"^language_model.lm_head": "lm_head",
}
_tied_weights_keys = {"lm_head.weight": "model.language_model.embed_tokens.weight"}
def __init__(self, config: AyaVisionConfig):
super().__init__(config)
self.model = AyaVisionModel(config)
self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False)
self.post_init()
def get_input_embeddings(self):
return self.model.get_input_embeddings()
def set_input_embeddings(self, value):
self.model.set_input_embeddings(value)
def get_output_embeddings(self) -> nn.Module:
return self.lm_head
def get_image_features(
self,
pixel_values: torch.FloatTensor,
vision_feature_layer: Optional[Union[int, list[int]]] = None,
vision_feature_select_strategy: Optional[str] = None,
**kwargs,
):
return self.model.get_image_features(
pixel_values=pixel_values,
vision_feature_layer=vision_feature_layer,
vision_feature_select_strategy=vision_feature_select_strategy,
**kwargs,
)
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
vision_feature_layer: Optional[Union[int, list[int]]] = None,
vision_feature_select_strategy: Optional[str] = None,
labels: Optional[torch.LongTensor] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
image_sizes: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, AyaVisionCausalLMOutputWithPast]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoProcessor, AyaVisionForConditionalGeneration
>>> import torch
>>> torch_device = "cuda:0"
>>> processor = AutoProcessor.from_pretrained("CohereForAI/aya-vision-8b", use_fast=True)
>>> model = AyaVisionForConditionalGeneration.from_pretrained("CohereForAI/aya-vision-8b", device_map=torch_device)
>>> messages = [
... {
... "role": "user",
... "content": [
... {
... "type": "image",
... "url": "https://pbs.twimg.com/media/Fx7YvfQWYAIp6rZ?format=jpg&name=medium",
... },
... {"type": "text", "text": "चित्र में लिखा पाठ क्या कहता है?"},
... ],
... }
... ]
>>> inputs = processor.apply_chat_template(
... messages, padding=True, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt", device=torch_device
... ).to(model.device)
>>> gen_tokens = model.generate(**inputs, max_new_tokens=300, do_sample=True, temperature=0.3)
>>> processor.tokenizer.decode(gen_tokens[0][inputs.input_ids.shape[1]:], skip_special_tokens=True)
```"""
vision_feature_layer = (
vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer
)
vision_feature_select_strategy = (
vision_feature_select_strategy
if vision_feature_select_strategy is not None
else self.config.vision_feature_select_strategy
)
outputs = self.model(
input_ids=input_ids,
pixel_values=pixel_values,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
vision_feature_layer=vision_feature_layer,
vision_feature_select_strategy=vision_feature_select_strategy,
cache_position=cache_position,
image_sizes=image_sizes,
**kwargs,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(
logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs
)
return AyaVisionCausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=outputs.image_hidden_states,
)
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
inputs_embeds=None,
pixel_values=None,
attention_mask=None,
cache_position=None,
logits_to_keep=None,
is_first_iteration=False,
**kwargs,
):
# Overwritten -- in specific circumstances we don't want to forward image inputs to the model
model_inputs = super().prepare_inputs_for_generation(
input_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
logits_to_keep=logits_to_keep,
is_first_iteration=is_first_iteration,
**kwargs,
)
if is_first_iteration or not kwargs.get("use_cache", True):
# Pixel values are used only in the first iteration if available
# In subsquent iterations, they are already merged with text and cached
# NOTE: first iteration doesn't have to be prefill, it can be the first
# iteration with a question and cached system prompt (continue generate from cache)
model_inputs["pixel_values"] = pixel_values
return model_inputs
__all__ = ["AyaVisionForConditionalGeneration", "AyaVisionPreTrainedModel", "AyaVisionModel"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/falcon_h1/configuration_falcon_h1.py | src/transformers/models/falcon_h1/configuration_falcon_h1.py | # coding=utf-8
# Copyright 2025 TII and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FalconH1 model configuration"""
from typing import Optional
from ...configuration_utils import PreTrainedConfig
from ...modeling_rope_utils import RopeParameters
from ...utils import logging
logger = logging.get_logger(__name__)
class FalconH1Config(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`FalconH1Model`]. It is used to instantiate a
FalconH1Model model according to the specified arguments, defining the model architecture. Instantiating a configuration
with defaults taken from [ibm-fms/FalconH1-9.8b-2.2T-hf](https://huggingface.co/ibm-fms/FalconH1-9.8b-2.2T-hf).
The FalconH1Model is a hybrid [mamba2](https://github.com/state-spaces/mamba) architecture with SwiGLU.
The checkpoints are jointly trained by IBM, Princeton, and UIUC.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 128000):
Vocabulary size of the FalconH1 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`FalconH1Model`]
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether the model's input and output word embeddings should be tied. Note that this is only relevant if the
model has a output word embedding layer.
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 14336):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer encoder.
num_key_value_heads (`int`, *optional*, defaults to 8):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `8`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
num_logits_to_keep (`int` or `None`, *optional*, defaults to 1):
Number of prompt logits to calculate during generation. If `None`, all logits will be calculated. If an
integer value, only last `num_logits_to_keep` logits will be calculated. Default is 1 because only the
logits of the last prompt token are needed for generation. For long sequences, the logits for the entire
sequence may use a lot of memory so, setting `num_logits_to_keep=1` will reduce memory footprint
significantly.
pad_token_id (`int`, *optional*, defaults to 0):
The id of the padding token.
bos_token_id (`int`, *optional*, defaults to 1):
The id of the "beginning-of-sequence" token.
eos_token_id (`int`, *optional*, defaults to 2):
The id of the "end-of-sequence" token.
max_position_embeddings (`int`, *optional*, defaults to 8192):
Max cached sequence length for the model
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
mamba_d_ssm (`int`, *optional*, defaults to 1024):
The dimension of the SSM state space latents.
mamba_n_heads (`int`, *optional*, defaults to 128):
The number of mamba heads used in the v2 implementation.
mamba_d_head (`int`, *optional*, defaults to `"auto"`):
Head embeddding dimension size
mamba_n_groups (`int`, *optional*, defaults to 1):
The number of the mamba groups used in the v2 implementation.
mamba_d_state (`int`, *optional*, defaults to 256):
The dimension the mamba state space latents
mamba_d_conv (`int`, *optional*, defaults to 4):
The size of the mamba convolution kernel
mamba_expand (`int`, *optional*, defaults to 2):
Expanding factor (relative to hidden_size) used to determine the mamba intermediate size
mamba_chunk_size (`int`, *optional*, defaults to 256):
The chunks in which to break the sequence when doing prefill/training
mamba_conv_bias (`bool`, *optional*, defaults to `True`):
Flag indicating whether or not to use bias in the convolution layer of the mamba mixer block.
mamba_proj_bias (`bool`, *optional*, defaults to `False`):
Flag indicating whether or not to use bias in the input and output projections (["in_proj", "out_proj"]) of the mamba mixer block
mamba_norm_before_gate (`bool`, *optional*, defaults to `True`):
Whether to use RMSNorm before the gate in the Mamba block
mamba_rms_norm (`bool`, *optional*, defaults to `False`):
Whether to use RMSNorm instead of LayerNorm in the Mamba block
projectors_bias (`bool`, *optional*, defaults to `False`):
Flag indicating whether or not to use bias in the input and output projections (["in_proj", "out_proj"]) of the attention block
rope_parameters (`float`, *optional*):
The scaling value used for the RoPE embeddings. If `None`, no scaling is applied.
lm_head_multiplier (`float`, *optional*, defaults to 1.0):
The multiplier for the LM head. This is used to scale the output of the LM head.
embedding_multiplier (`float`, *optional*, defaults to 1.0):
The multiplier for the embedding layer. This is used to scale the output of the embedding layer.
mlp_multipliers (`list[float]`, *optional*):
The multipliers for the MLP layers. This is used to scale the output of the MLP layers. The first value is
the multiplier of gate layer, the second value is the multiplier of the down_proj layer.
key_multiplier (`float`, *optional*):
The multiplier for the key layer. This is used to scale the output of the key layer.
attention_out_multiplier (`float`, *optional*):
The multiplier for the attention output layer. This is used to scale the output of the attention output
attention_in_multiplier (`float`, *optional*):
The multiplier for the attention input layer. This is used to scale the output of the attention input layer.
ssm_multipliers (`list[float]`, *optional*):
The multipliers for the SSM layers. This is used to scale the output of the SSM layers.
ssm_in_multiplier (`float`, *optional*):
The multiplier for the SSM input layer. This is used to scale the output of the SSM input layer.
ssm_out_multiplier (`float`, *optional*):
The multiplier for the SSM output layer. This is used to scale the output of the SSM output layer.
"""
model_type = "falcon_h1"
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
vocab_size: Optional[int] = 128000,
tie_word_embeddings: Optional[bool] = False,
hidden_size: Optional[int] = 4096,
intermediate_size: Optional[int] = 14336,
num_hidden_layers: Optional[int] = 32,
num_attention_heads: Optional[int] = 32,
num_key_value_heads: Optional[int] = 8,
hidden_act: Optional[str] = "silu",
initializer_range: Optional[float] = 0.02,
rms_norm_eps: Optional[int] = 1e-5,
use_cache: Optional[int] = True,
num_logits_to_keep: Optional[int] = 1,
pad_token_id: Optional[int] = 0,
bos_token_id: Optional[int] = 1,
eos_token_id: Optional[int] = 2,
max_position_embeddings: Optional[int] = 8192,
attention_dropout: Optional[float] = 0.0,
mamba_d_ssm: Optional[int] = 1024,
mamba_n_heads: Optional[int] = 128,
mamba_d_head: Optional[str] = "auto",
mamba_n_groups: Optional[int] = 1,
mamba_d_state: Optional[int] = 256,
mamba_d_conv: Optional[int] = 4,
mamba_expand: Optional[int] = 2,
mamba_chunk_size: Optional[int] = 256,
mamba_conv_bias: Optional[bool] = True,
mamba_proj_bias: Optional[bool] = False,
mamba_norm_before_gate: Optional[bool] = True,
mamba_rms_norm: Optional[bool] = False,
projectors_bias: Optional[bool] = False,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
lm_head_multiplier: Optional[float] = 1.0,
embedding_multiplier: Optional[float] = 1.0,
mlp_multipliers: Optional[int] = None,
key_multiplier: Optional[int] = None,
attention_out_multiplier: Optional[int] = None,
attention_in_multiplier: Optional[int] = None,
ssm_multipliers: Optional[int] = None,
ssm_in_multiplier: Optional[int] = None,
ssm_out_multiplier: Optional[int] = None,
**kwargs,
):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.max_position_embeddings = max_position_embeddings
self.attention_dropout = attention_dropout
self.attention_bias = False
self.mlp_bias = False
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.num_logits_to_keep = num_logits_to_keep
self.projectors_bias = projectors_bias
mamba_intermediate = mamba_expand * hidden_size if mamba_d_ssm is None else mamba_d_ssm
if mamba_intermediate % mamba_n_heads != 0:
raise ValueError("mamba_n_heads must divide mamba_expand * hidden_size")
# for the mamba_v2, must satisfy the following
if mamba_d_head == "auto":
mamba_d_head = mamba_intermediate // mamba_n_heads
if mamba_d_head * mamba_n_heads != mamba_intermediate:
raise ValueError("The dimensions for the Mamba head state do not match the model intermediate_size")
self.mamba_d_ssm = mamba_d_ssm
self.mamba_n_heads = mamba_n_heads
self.mamba_d_head = mamba_d_head
self.mamba_n_groups = mamba_n_groups
self.mamba_d_state = mamba_d_state
self.mamba_d_conv = mamba_d_conv
self.mamba_expand = mamba_expand
self.mamba_chunk_size = mamba_chunk_size
self.mamba_conv_bias = mamba_conv_bias
self.mamba_proj_bias = mamba_proj_bias
self.mamba_norm_before_gate = mamba_norm_before_gate
self.mamba_rms_norm = mamba_rms_norm
self.lm_head_multiplier = lm_head_multiplier
self.embedding_multiplier = embedding_multiplier
if mlp_multipliers is not None:
self.mlp_multipliers = mlp_multipliers
else:
self.mlp_multipliers = [1.0, 1.0]
if attention_out_multiplier is not None:
self.attention_out_multiplier = attention_out_multiplier
else:
self.attention_out_multiplier = 1.0
if attention_in_multiplier is not None:
self.attention_in_multiplier = attention_in_multiplier
else:
self.attention_in_multiplier = 1.0
if key_multiplier is not None:
self.key_multiplier = key_multiplier
else:
self.key_multiplier = 1.0
if ssm_multipliers is not None:
self.ssm_multipliers = ssm_multipliers
else:
self.ssm_multipliers = [1.0, 1.0, 1.0, 1.0, 1.0]
if ssm_in_multiplier is not None:
self.ssm_in_multiplier = ssm_in_multiplier
else:
self.ssm_in_multiplier = 1.0
if ssm_out_multiplier is not None:
self.ssm_out_multiplier = ssm_out_multiplier
else:
self.ssm_out_multiplier = 1.0
self.rope_parameters = rope_parameters
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
@property
def layers_block_type(self):
return ["attention" for i in range(self.num_hidden_layers)]
__all__ = ["FalconH1Config"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/falcon_h1/modeling_falcon_h1.py | src/transformers/models/falcon_h1/modeling_falcon_h1.py | # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# This file was automatically generated from src/transformers/models/falcon_h1/modular_falcon_h1.py.
# Do NOT edit this file manually as any edits will be overwritten by the generation of
# the file from the modular. If any change should be done, please apply the change to the
# modular_falcon_h1.py file directly. One of our CI enforces this.
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# coding=utf-8
# Copyright 2025 Technology Innovation Institute and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Callable
from typing import Any, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from transformers.activations import ACT2FN
from ... import initialization as init
from ...cache_utils import Cache
from ...generation import GenerationMixin
from ...integrations import use_kernel_forward_from_hub, use_kernel_func_from_hub, use_kernelized_func
from ...modeling_attn_mask_utils import AttentionMaskConverter
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, is_torchdynamo_compiling, logging
from ...utils.generic import maybe_autocast
from ...utils.import_utils import is_causal_conv1d_available, is_mamba_2_ssm_available
from .configuration_falcon_h1 import FalconH1Config
if is_mamba_2_ssm_available():
from mamba_ssm.ops.triton.selective_state_update import selective_state_update
from mamba_ssm.ops.triton.ssd_combined import mamba_chunk_scan_combined, mamba_split_conv1d_scan_combined
else:
selective_state_update = None
if is_causal_conv1d_available():
from causal_conv1d import causal_conv1d_fn, causal_conv1d_update
else:
causal_conv1d_update, causal_conv1d_fn = None, None
logger = logging.get_logger(__name__)
class FalconHybridMambaAttentionDynamicCache:
"""
A dynamic cache that can handle both the attention cache (which has a seq_len dimension) and the mamba cache
(which has a constant shape regardless of seq_len).
This cache has two sets of lists of tensors: `key_cache` and `value_cache` for attention cache and `conv_states`
and `ssm_states` for mamba cache. Each of these lists has `num_layers` tensors. The expected shape for each tensor
For attention layers, `key_cache` and `value_cache` have a shape of `(batch_size, num_heads, seq_len, head_dim)`,
while `conv_states` and `ssm_states` have a shape of `(batch_size, 0)` (empty tensors).
For mamba layers, `key_cache` and `value_cache` have a shape of `(batch_size, 0)` (empty tensors),
while `conv_states` represents the convolution state and has a shape of `(batch_size, d_inner, d_conv)`,
and `ssm_states` represents the ssm state and has a shape of `(batch_size, d_inner, d_state)`.
"""
is_compileable = False
def __init__(
self,
config: FalconH1Config,
batch_size: int,
dtype: torch.dtype = torch.float16,
devices: Optional[list[str]] = None,
):
self.seqlen_offset = 0
self.dtype = dtype
self.has_previous_state = False
self.conv_kernel_size = config.mamba_d_conv
self.intermediate_size = (
config.mamba_d_ssm if config.mamba_d_ssm is not None else int(config.mamba_expand * config.hidden_size)
)
self.conv_states = {
i: torch.zeros(
batch_size,
self.intermediate_size + 2 * config.mamba_n_groups * config.mamba_d_state,
self.conv_kernel_size,
device=devices[i],
dtype=dtype,
)
for i in range(config.num_hidden_layers)
}
self.ssm_states = {
i: torch.zeros(
batch_size,
config.mamba_n_heads,
config.mamba_d_head,
config.mamba_d_state,
device=devices[i],
dtype=dtype,
)
for i in range(config.num_hidden_layers)
}
self.transformer_layers = []
for i in range(config.num_hidden_layers):
self.transformer_layers.append(i)
self.key_cache: list[torch.Tensor] = []
self.value_cache: list[torch.Tensor] = []
def __len__(self):
return len(self.key_cache)
def __getitem__(self, layer_idx):
return self.key_cache[layer_idx], self.value_cache[layer_idx]
def update(
self,
key_states: torch.Tensor,
value_states: torch.Tensor,
layer_idx: int,
cache_kwargs: Optional[dict[str, Any]] = None,
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`.
Parameters:
key_states (`torch.Tensor`):
The new key states to cache.
value_states (`torch.Tensor`):
The new value states to cache.
layer_idx (`int`):
The index of the layer to cache the states for.
cache_kwargs (`dict[str, Any]`, `optional`):
Additional arguments for the cache subclass. No additional arguments are used in `DynamicCache`.
Return:
A tuple containing the updated key and value states.
"""
# Update the cache
if len(self.key_cache) <= layer_idx:
# There may be skipped layers, fill them with empty lists
for _ in range(len(self.key_cache), layer_idx):
self.key_cache.append([])
self.value_cache.append([])
self.key_cache.append(key_states)
self.value_cache.append(value_states)
elif len(self.key_cache[layer_idx]) == 0: # fills previously skipped layers; checking for tensor causes errors
self.key_cache[layer_idx] = key_states
self.value_cache[layer_idx] = value_states
else:
self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=-2)
self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=-2)
return self.key_cache[layer_idx], self.value_cache[layer_idx]
def reorder_cache(self, beam_idx: torch.LongTensor):
"""Reorders the cache for beam search, given the selected beam indices."""
if self.get_seq_length() > 0:
for layer_idx in range(len(self.key_cache)):
device = self.key_cache[layer_idx].device
self.key_cache[layer_idx] = self.key_cache[layer_idx].index_select(0, beam_idx.to(device))
device = self.value_cache[layer_idx].device
self.value_cache[layer_idx] = self.value_cache[layer_idx].index_select(0, beam_idx.to(device))
device = self.conv_states[layer_idx].device
self.conv_states[layer_idx] = self.conv_states[layer_idx].index_select(0, beam_idx.to(device))
device = self.ssm_states[layer_idx].device
self.ssm_states[layer_idx] = self.ssm_states[layer_idx].index_select(0, beam_idx.to(device))
def get_mask_sizes(self, cache_position: torch.Tensor, layer_idx: int) -> tuple[int, int]:
"""Return the length and offset of the cache, used to generate the mask"""
kv_offset = 0
query_length = cache_position.shape[0]
kv_length = self.get_seq_length(layer_idx) + query_length
return kv_length, kv_offset
def get_seq_length(self, layer_idx: Optional[int] = 0) -> int:
"""Returns the sequence length of the cached states. A layer index can be optionally passed."""
# take any layer that contains cache and not empty tensor
layer_idx = self.transformer_layers[0] if layer_idx not in self.transformer_layers else layer_idx
if len(self.key_cache) <= layer_idx or self.key_cache[layer_idx].shape[-1] == 0:
return 0
return self.key_cache[layer_idx].shape[-2]
def update_conv_state(
self,
layer_idx: int,
new_conv_state: torch.Tensor,
cache_position: torch.LongTensor,
) -> torch.Tensor:
conv_state = self.conv_states[layer_idx]
cache_position = cache_position.clamp(0, self.conv_kernel_size - 1)
conv_state = conv_state.roll(shifts=-1, dims=-1)
if len(cache_position) > 1:
conv_state[:, :, :] = new_conv_state.to(conv_state.device)
else:
conv_state[:, :, -1] = new_conv_state[:, :, -1].to(conv_state.device)
self.conv_states[layer_idx].zero_()
self.conv_states[layer_idx] += conv_state
return self.conv_states[layer_idx]
def reset(self):
self.conv_states.zero_()
self.ssm_states.zero_()
class FalconH1RotaryEmbedding(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: FalconH1Config, device=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.register_buffer("original_inv_freq", inv_freq.clone(), persistent=False)
@staticmethod
def compute_default_rope_parameters(
config: Optional[FalconH1Config] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with maybe_autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
@use_kernel_func_from_hub("rotary_pos_emb")
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
@use_kernelized_func(apply_rotary_pos_emb)
class FalconH1Attention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: FalconH1Config, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.o_proj = nn.Linear(
config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
)
self.key_multiplier = config.key_multiplier
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) * self.key_multiplier
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class FalconH1RMSNormGated(torch.nn.Module):
def __init__(self, hidden_size, eps=1e-6, n_groups=1, norm_before_gate=True):
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
self.n_groups = n_groups
self.norm_before_gate = norm_before_gate
def forward(self, hidden_states, gate=None):
input_dtype = hidden_states.dtype
if not self.norm_before_gate and gate is not None:
hidden_states = hidden_states * F.silu(gate.to(torch.float32))
if len(hidden_states.shape) == 3:
batch_size, seq_len, dim = hidden_states.shape
else:
batch_size, dim = hidden_states.shape
seq_len = 1
hidden_states = hidden_states.to(torch.float32)
hidden_states = hidden_states.view(batch_size, seq_len, self.n_groups, int(dim // self.n_groups))
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
hidden_states = self.weight.view(self.n_groups, int(dim // self.n_groups)) * hidden_states
hidden_states = hidden_states.view(batch_size, seq_len, dim)
if seq_len == 1:
hidden_states = hidden_states.squeeze(1)
if self.norm_before_gate and gate is not None:
hidden_states = hidden_states * F.silu(gate.to(torch.float32))
return hidden_states.to(input_dtype)
# Helper methods for segment sum computation
def pad_tensor_by_size(input_tensor: torch.Tensor, pad_size: int):
"""
Padding x tensor with `pad_size` on the seq_len dim (dim=1)
Assumes that we only have tensors of either size 4 or 3
"""
pad_shape = (0, 0, 0, 0, 0, pad_size, 0, 0) if len(input_tensor.shape) == 4 else (0, 0, 0, pad_size, 0, 0)
return torch.nn.functional.pad(input_tensor, pad_shape, mode="constant", value=0)
def reshape_into_chunks(input_tensor, pad_size, chunk_size):
"""
Padding input_tensor with `pad_size` on the seq_len dim (dim=1) and
simultaneously splitting it into chunk sequences.
Assumes that we only have tensors of either size 4 or 3
"""
# [bsz, seq_len, ...] -> [bsz, seq_len multiple of chunk_size, ...]
input_tensor = pad_tensor_by_size(input_tensor, pad_size)
if len(input_tensor.shape) == 3:
# [bsz, seq_len multiple of chunk_size, num_heads] -> [bsz, -1, chunk_size, num_heads]
return input_tensor.reshape(input_tensor.shape[0], -1, chunk_size, input_tensor.shape[2])
else:
# [bsz, seq_len multiple of chunk_size, num_heads, head_dim or state_size] -> [bsz, -1, chunk_size, num_heads, head_dim or state_size]
return input_tensor.reshape(
input_tensor.shape[0], -1, chunk_size, input_tensor.shape[2], input_tensor.shape[3]
)
def segment_sum(input_tensor):
"""
More stable segment sum calculation. Uses cumulative sums and masking instead of direct subtractions.
"""
chunk_size = input_tensor.size(-1)
# 1. expand input tensor to have an additional dimension and repeat along that dimension
# [..., chunk_size] -> [..., chunk_size, chunk_size]
input_tensor = input_tensor[..., None].expand(*input_tensor.size(), chunk_size)
# 2. create a lower triangular mask with the diagonal set to 0 to 0 out elements above diag
mask = torch.tril(torch.ones(chunk_size, chunk_size, device=input_tensor.device, dtype=torch.bool), diagonal=-1)
input_tensor = input_tensor.masked_fill(~mask, 0)
# 3. compute actual cumsum
tensor_segsum = torch.cumsum(input_tensor, dim=-2)
# 4. apply mask to keep only the lower triangular part of the cumulative sum result (incl diagonal this time)
mask = torch.tril(torch.ones(chunk_size, chunk_size, device=input_tensor.device, dtype=torch.bool), diagonal=0)
tensor_segsum = tensor_segsum.masked_fill(~mask, -torch.inf)
return tensor_segsum
def apply_mask_to_padding_states(hidden_states, attention_mask):
"""
Tunes out the hidden states for padding tokens, see https://github.com/state-spaces/mamba/issues/66
"""
# NOTE: attention mask is a 2D boolean tensor
if attention_mask is not None and attention_mask.shape[1] > 1 and attention_mask.shape[0] > 1:
dtype = hidden_states.dtype
hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype)
return hidden_states
is_fast_path_available = all((selective_state_update, causal_conv1d_fn, causal_conv1d_update))
# Adapted from transformers.models.mamba2.modeling_mamba2.Mamba2Mixer
class FalconH1Mixer(nn.Module):
"""
FalconH1Mixer is identical to classic Mamba2 mixer classes but differs on two different things
- Users can pass custom intermediate_size through `config.mamba_d_ssm`
- The use of gated RMS normalization layer is optional
"""
def __init__(self, config: FalconH1Config, layer_idx: int):
super().__init__()
self.num_heads = config.mamba_n_heads
self.hidden_size = config.hidden_size
self.ssm_state_size = config.mamba_d_state
self.conv_kernel_size = config.mamba_d_conv
self.intermediate_size = (
int(config.mamba_expand * self.hidden_size) if config.mamba_d_ssm is None else config.mamba_d_ssm
)
self.layer_idx = layer_idx
self.use_conv_bias = config.mamba_conv_bias
self.activation = config.hidden_act
self.act = ACT2FN[config.hidden_act]
self.use_bias = config.mamba_proj_bias
self.layer_norm_epsilon = config.rms_norm_eps
self.groups_time_state_size = config.mamba_n_groups * self.ssm_state_size
self.n_groups = config.mamba_n_groups
self.head_dim = config.mamba_d_head
self.chunk_size = config.mamba_chunk_size
# FIXME:
self.time_step_limit = (0.0, float("inf"))
self.time_step_min = 0.001
self.time_step_max = 0.1
self.conv_dim = self.intermediate_size + 2 * self.n_groups * self.ssm_state_size
self.conv1d = nn.Conv1d(
in_channels=self.conv_dim,
out_channels=self.conv_dim,
bias=config.mamba_conv_bias,
kernel_size=self.conv_kernel_size,
groups=self.conv_dim,
padding=self.conv_kernel_size - 1,
)
# projection of the input hidden states
projection_size = self.intermediate_size + self.conv_dim + self.num_heads
self.in_proj = nn.Linear(
self.hidden_size,
projection_size,
bias=self.use_bias,
)
# selective projection used to make dt, B and C input dependant
# time step projection (discretization)
# instantiate once and copy inv_dt in init_weights of PretrainedModel
self.dt_bias = nn.Parameter(torch.ones(self.num_heads))
# S4D real initialization. These are not discretized!
# The core is to load them, compute the discrete states, then write the updated state. Keeps the memory bounded
A = torch.arange(1, self.num_heads + 1)
self.A_log = nn.Parameter(torch.log(A))
self.mamba_rms_norm = config.mamba_rms_norm
if self.mamba_rms_norm:
self.norm = FalconH1RMSNormGated(
self.intermediate_size,
eps=self.layer_norm_epsilon,
n_groups=self.n_groups,
norm_before_gate=config.mamba_norm_before_gate,
)
self.D = nn.Parameter(torch.ones(self.num_heads))
self.out_proj = nn.Linear(self.intermediate_size, config.hidden_size, bias=config.projectors_bias)
if not is_fast_path_available:
logger.warning_once(
"The fast path is not available because one of `(selective_state_update, causal_conv1d_fn, causal_conv1d_update)`"
" is None. Falling back to the naive implementation. To install follow https://github.com/state-spaces/mamba/#installation and"
" https://github.com/Dao-AILab/causal-conv1d"
)
else:
logger.warning_once("The fast path for FalconH1 will be used when running the model on a GPU")
self.zxbcdt_multipliers = config.ssm_multipliers
self.ssm_in_multiplier = config.ssm_in_multiplier
def cuda_kernels_forward(
self,
hidden_states: torch.Tensor,
cache_params: Optional[FalconHybridMambaAttentionDynamicCache] = None,
cache_position: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
):
# 1. Gated MLP's linear projection
hidden_states = apply_mask_to_padding_states(hidden_states, attention_mask)
# Add Multipliers
hidden_states = hidden_states * self.ssm_in_multiplier
projected_states = self.in_proj(hidden_states)
projected_states = projected_states * self.mup_vector # ADD Mup Multipliers
d_to_remove = 2 * self.intermediate_size + 2 * self.n_groups * self.ssm_state_size + self.num_heads
# Set up dimensions for reshapes later
batch_size, seq_len, _ = hidden_states.shape
groups_time_state_size = self.n_groups * self.ssm_state_size
use_precomputed_states = (
cache_params is not None
and cache_params.has_previous_state
and seq_len == 1
and cache_params.conv_states[self.layer_idx].shape[0]
== cache_params.ssm_states[self.layer_idx].shape[0]
== batch_size
and cache_position is not None
and cache_position[0] > 0
)
# getting projected states from cache if it exists
if use_precomputed_states:
d_mlp = (projected_states.squeeze(1).shape[-1] - d_to_remove) // 2
z0, x0, gate, hidden_states_B_C, dt = projected_states.squeeze(1).split(
[d_mlp, d_mlp, self.intermediate_size, self.conv_dim, self.num_heads], dim=-1
)
# 2. Convolution sequence transformation
hidden_states_B_C = causal_conv1d_update(
hidden_states_B_C,
cache_params.conv_states[self.layer_idx],
self.conv1d.weight.squeeze(1),
self.conv1d.bias,
self.activation,
)
hidden_states, B, C = torch.split(
hidden_states_B_C,
[self.intermediate_size, groups_time_state_size, groups_time_state_size],
dim=-1,
)
# 3. SSM transformation
A = -torch.exp(self.A_log.float()) # (nheads,)
A = A[:, None, ...][:, :, None].expand(-1, self.head_dim, self.ssm_state_size).to(dtype=torch.float32)
dt = dt[:, :, None].expand(-1, -1, self.head_dim)
dt_bias = self.dt_bias[:, None, ...].expand(-1, self.head_dim)
D = self.D[:, None, ...].expand(-1, self.head_dim)
B = B.view(batch_size, self.n_groups, B.shape[1] // self.n_groups)
C = C.view(batch_size, self.n_groups, C.shape[1] // self.n_groups)
hidden_states_reshaped = hidden_states.view(batch_size, self.num_heads, self.head_dim)
hidden_states = selective_state_update(
cache_params.ssm_states[self.layer_idx],
hidden_states_reshaped,
dt,
A,
B,
C,
D,
z=gate.view(batch_size, self.num_heads, self.head_dim) if not self.mamba_rms_norm else None,
dt_bias=dt_bias,
dt_softplus=True,
)
hidden_states = hidden_states.view(batch_size, self.num_heads * self.head_dim)
if self.mamba_rms_norm:
hidden_states = self.norm(hidden_states, gate)
if d_mlp > 0:
hidden_states = torch.cat([F.silu(z0) * x0, hidden_states], dim=-1)
# 4. Final linear projection
out = self.out_proj(hidden_states[:, None, ...])
# Fused calculations or step by step if no initialized cache is found
else:
A = -torch.exp(self.A_log.float()) # (num_heads) or (intermediate_size, state_size)
dt_limit_kwargs = {} if self.time_step_limit == (0.0, float("inf")) else {"dt_limit": self.time_step_limit}
# 2-4. Fused kernel for conv1d, SSM, and the final projection
if self.training and cache_params is None:
out = mamba_split_conv1d_scan_combined(
projected_states,
self.conv1d.weight.squeeze(1),
self.conv1d.bias,
self.dt_bias,
A,
D=self.D,
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | true |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/falcon_h1/convert_mamba_ssm_checkpoint.py | src/transformers/models/falcon_h1/convert_mamba_ssm_checkpoint.py | # coding=utf-8
# Copyright 2025 TII and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script can be used to convert checkpoints provided in the `mamba_ssm` library into the format provided in HuggingFace `transformers`. It depends on the `mamba2_ssm` package to be installed."""
import argparse
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, FalconH1Config, FalconH1ForCausalLM
CONVERSION_MAPPING = {
"backbone": "model",
"embeddings": "embed_tokens",
"mixer.": "",
"mixer_ssm": "mamba",
"mixer_attn": "self_attn",
"mlp.": "feed_forward.",
"mlp_norm": "pre_ff_layernorm",
"ssm_proj": "mamba.in_proj",
"attn_out_proj": "o_proj",
".norm.": ".input_layernorm.",
".mamba.input_layernorm.": ".mamba.norm.",
".ssm_out_proj.": ".mamba.out_proj.",
"norm_f": "final_layernorm",
}
def convert_falcon_h1_to_hf(input_model_path, output_path):
tokenizer = AutoTokenizer.from_pretrained(input_model_path)
model = AutoModelForCausalLM.from_pretrained(input_model_path, dtype=torch.bfloat16, trust_remote_code=True)
intermediate_size = int(model.config.expansion_factor * model.config.hidden_size)
if intermediate_size % 2 != 0:
intermediate_size = intermediate_size + (intermediate_size % 2)
new_config = FalconH1Config(
vocab_size=model.config.vocab_size,
tie_word_embeddings=model.config.tie_word_embeddings,
hidden_size=model.config.hidden_size,
intermediate_size=intermediate_size,
mamba_d_state=model.config.state_size,
num_hidden_layers=model.config.num_hidden_layers,
mamba_use_mlp=model.config.use_mlp,
rms_norm_eps=model.config.layer_norm_epsilon,
pad_token_id=model.config.pad_token_id,
eos_token_id=model.config.eos_token_id,
mamba_expand=model.config.expand,
mamba_d_conv=model.config.conv_kernel,
mamba_n_groups=model.config.n_groups,
mamba_n_heads=model.config.num_heads,
mamba_norm_before_gate=model.config.norm_before_gate,
mamba_rms_norm=model.config.rms_norm,
mamba_d_ssm=model.config.d_ssm,
attention_bias=model.config.use_bias,
projectors_bias=model.config.use_bias,
mamba_conv_bias=model.config.use_conv_bias,
hidden_act=model.config.hidden_act,
use_cache=model.config.use_cache,
mamba_chunk_size=model.config.chunk_size,
num_attention_heads=model.config.num_heads_mha,
num_key_value_heads=model.config.num_key_value_heads,
head_dim=model.config.head_dim_mha,
lm_head_multiplier=model.config.lm_head_multiplier,
embedding_multiplier=model.config.embedding_multiplier,
mlp_multipliers=model.config.mlp_multipliers,
key_multiplier=model.config.key_multiplier,
attention_out_multiplier=model.config.attention_out_multiplier,
attention_in_multiplier=model.config.attention_in_multiplier,
ssm_multipliers=model.config.ssm_multipliers,
ssm_in_multiplier=model.config.ssm_in_multiplier,
ssm_out_multiplier=model.config.ssm_out_multiplier,
rope_theta=model.config.rope_theta,
)
old_state_dict = model.state_dict()
new_state_dict = {}
for old_key, old_value in old_state_dict.items():
new_key = old_key
for conversion_key, conversion_value in CONVERSION_MAPPING.items():
if conversion_key in old_key:
new_key = new_key.replace(conversion_key, conversion_value)
if "mamba.input_layernorm" in new_key:
new_key = new_key.replace("mamba.input_layernorm", "mamba.norm")
# Special processing for attention layers
if "self_attn.attn_proj" in new_key:
num_heads = new_config.num_attention_heads
num_kv_heads = new_config.num_key_value_heads
head_dim = new_config.head_dim
q_proj, k_proj, v_proj = old_value.split(
[
num_heads * head_dim,
num_kv_heads * head_dim,
num_kv_heads * head_dim,
],
dim=0,
)
new_state_dict[new_key.replace("attn_proj", "q_proj")] = q_proj
new_state_dict[new_key.replace("attn_proj", "k_proj")] = k_proj
new_state_dict[new_key.replace("attn_proj", "v_proj")] = v_proj
else:
new_state_dict[new_key] = old_value
with torch.device("meta"):
new_model = FalconH1ForCausalLM(new_config)
del model
new_model.load_state_dict(new_state_dict, strict=True, assign=True)
new_model.save_pretrained(output_path)
tokenizer.save_pretrained(output_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-i",
"--mamba_ssm_checkpoint_directory",
type=str,
required=True,
help="Path to a directory containing the `pytorch_model.bin` mamba_ssm checkpoint file to be converted.",
)
parser.add_argument(
"-o", "--output_dir", type=str, required=True, help="Path to directory to save the converted output model to."
)
args = parser.parse_args()
convert_falcon_h1_to_hf(
args.mamba_ssm_checkpoint_directory,
args.output_dir,
)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/falcon_h1/modular_falcon_h1.py | src/transformers/models/falcon_h1/modular_falcon_h1.py | # coding=utf-8
# Copyright 2025 Technology Innovation Institute and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch FalconH1 model."""
from collections.abc import Callable
from typing import Any, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from transformers.activations import ACT2FN
from transformers.models.jamba.modeling_jamba import HybridMambaAttentionDynamicCache
from transformers.models.llama.modeling_llama import (
LlamaAttention,
LlamaForCausalLM,
LlamaMLP,
LlamaRMSNorm,
LlamaRotaryEmbedding,
apply_rotary_pos_emb,
eager_attention_forward,
)
from transformers.models.mamba2.modeling_mamba2 import (
MambaRMSNormGated,
apply_mask_to_padding_states,
pad_tensor_by_size,
reshape_into_chunks,
segment_sum,
)
from ... import initialization as init
from ...cache_utils import Cache
from ...modeling_attn_mask_utils import AttentionMaskConverter
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...utils import auto_docstring, can_return_tuple, is_torchdynamo_compiling, logging
from ...utils.import_utils import is_causal_conv1d_available, is_mamba_2_ssm_available
from .configuration_falcon_h1 import FalconH1Config
if is_mamba_2_ssm_available():
from mamba_ssm.ops.triton.selective_state_update import selective_state_update
from mamba_ssm.ops.triton.ssd_combined import mamba_chunk_scan_combined, mamba_split_conv1d_scan_combined
else:
selective_state_update = None
if is_causal_conv1d_available():
from causal_conv1d import causal_conv1d_fn, causal_conv1d_update
else:
causal_conv1d_update, causal_conv1d_fn = None, None
is_fast_path_available = all((selective_state_update, causal_conv1d_fn, causal_conv1d_update))
logger = logging.get_logger(__name__)
class FalconHybridMambaAttentionDynamicCache(HybridMambaAttentionDynamicCache):
"""
A dynamic cache that can handle both the attention cache (which has a seq_len dimension) and the mamba cache
(which has a constant shape regardless of seq_len).
This cache has two sets of lists of tensors: `key_cache` and `value_cache` for attention cache and `conv_states`
and `ssm_states` for mamba cache. Each of these lists has `num_layers` tensors. The expected shape for each tensor
For attention layers, `key_cache` and `value_cache` have a shape of `(batch_size, num_heads, seq_len, head_dim)`,
while `conv_states` and `ssm_states` have a shape of `(batch_size, 0)` (empty tensors).
For mamba layers, `key_cache` and `value_cache` have a shape of `(batch_size, 0)` (empty tensors),
while `conv_states` represents the convolution state and has a shape of `(batch_size, d_inner, d_conv)`,
and `ssm_states` represents the ssm state and has a shape of `(batch_size, d_inner, d_state)`.
"""
def __init__(
self,
config: FalconH1Config,
batch_size: int,
dtype: torch.dtype = torch.float16,
devices: Optional[list[str]] = None,
):
self.seqlen_offset = 0
self.dtype = dtype
self.has_previous_state = False
self.conv_kernel_size = config.mamba_d_conv
self.intermediate_size = (
config.mamba_d_ssm if config.mamba_d_ssm is not None else int(config.mamba_expand * config.hidden_size)
)
self.conv_states = {
i: torch.zeros(
batch_size,
self.intermediate_size + 2 * config.mamba_n_groups * config.mamba_d_state,
self.conv_kernel_size,
device=devices[i],
dtype=dtype,
)
for i in range(config.num_hidden_layers)
}
self.ssm_states = {
i: torch.zeros(
batch_size,
config.mamba_n_heads,
config.mamba_d_head,
config.mamba_d_state,
device=devices[i],
dtype=dtype,
)
for i in range(config.num_hidden_layers)
}
self.transformer_layers = []
for i in range(config.num_hidden_layers):
self.transformer_layers.append(i)
self.key_cache: list[torch.Tensor] = []
self.value_cache: list[torch.Tensor] = []
def update(
self,
key_states: torch.Tensor,
value_states: torch.Tensor,
layer_idx: int,
cache_kwargs: Optional[dict[str, Any]] = None,
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`.
Parameters:
key_states (`torch.Tensor`):
The new key states to cache.
value_states (`torch.Tensor`):
The new value states to cache.
layer_idx (`int`):
The index of the layer to cache the states for.
cache_kwargs (`dict[str, Any]`, `optional`):
Additional arguments for the cache subclass. No additional arguments are used in `DynamicCache`.
Return:
A tuple containing the updated key and value states.
"""
# Update the cache
if len(self.key_cache) <= layer_idx:
# There may be skipped layers, fill them with empty lists
for _ in range(len(self.key_cache), layer_idx):
self.key_cache.append([])
self.value_cache.append([])
self.key_cache.append(key_states)
self.value_cache.append(value_states)
elif len(self.key_cache[layer_idx]) == 0: # fills previously skipped layers; checking for tensor causes errors
self.key_cache[layer_idx] = key_states
self.value_cache[layer_idx] = value_states
else:
self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=-2)
self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=-2)
return self.key_cache[layer_idx], self.value_cache[layer_idx]
def update_conv_state(
self,
layer_idx: int,
new_conv_state: torch.Tensor,
cache_position: torch.LongTensor,
) -> torch.Tensor:
conv_state = self.conv_states[layer_idx]
cache_position = cache_position.clamp(0, self.conv_kernel_size - 1)
conv_state = conv_state.roll(shifts=-1, dims=-1)
if len(cache_position) > 1:
conv_state[:, :, :] = new_conv_state.to(conv_state.device)
else:
conv_state[:, :, -1] = new_conv_state[:, :, -1].to(conv_state.device)
self.conv_states[layer_idx].zero_()
self.conv_states[layer_idx] += conv_state
return self.conv_states[layer_idx]
def reset(self):
self.conv_states.zero_()
self.ssm_states.zero_()
class FalconH1RotaryEmbedding(LlamaRotaryEmbedding):
pass
class FalconH1Attention(LlamaAttention):
def __init__(self, config: FalconH1Config, layer_idx: int):
super().__init__(config, layer_idx)
self.key_multiplier = config.key_multiplier
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) * self.key_multiplier
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class FalconH1RMSNormGated(MambaRMSNormGated):
def __init__(self, hidden_size, eps=1e-6, n_groups=1, norm_before_gate=True):
super().__init__(hidden_size=hidden_size, eps=eps)
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
self.n_groups = n_groups
self.norm_before_gate = norm_before_gate
def forward(self, hidden_states, gate=None):
input_dtype = hidden_states.dtype
if not self.norm_before_gate and gate is not None:
hidden_states = hidden_states * F.silu(gate.to(torch.float32))
if len(hidden_states.shape) == 3:
batch_size, seq_len, dim = hidden_states.shape
else:
batch_size, dim = hidden_states.shape
seq_len = 1
hidden_states = hidden_states.to(torch.float32)
hidden_states = hidden_states.view(batch_size, seq_len, self.n_groups, int(dim // self.n_groups))
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
hidden_states = self.weight.view(self.n_groups, int(dim // self.n_groups)) * hidden_states
hidden_states = hidden_states.view(batch_size, seq_len, dim)
if seq_len == 1:
hidden_states = hidden_states.squeeze(1)
if self.norm_before_gate and gate is not None:
hidden_states = hidden_states * F.silu(gate.to(torch.float32))
return hidden_states.to(input_dtype)
# Adapted from transformers.models.mamba2.modeling_mamba2.Mamba2Mixer
class FalconH1Mixer(nn.Module):
"""
FalconH1Mixer is identical to classic Mamba2 mixer classes but differs on two different things
- Users can pass custom intermediate_size through `config.mamba_d_ssm`
- The use of gated RMS normalization layer is optional
"""
def __init__(self, config: FalconH1Config, layer_idx: int):
super().__init__()
self.num_heads = config.mamba_n_heads
self.hidden_size = config.hidden_size
self.ssm_state_size = config.mamba_d_state
self.conv_kernel_size = config.mamba_d_conv
self.intermediate_size = (
int(config.mamba_expand * self.hidden_size) if config.mamba_d_ssm is None else config.mamba_d_ssm
)
self.layer_idx = layer_idx
self.use_conv_bias = config.mamba_conv_bias
self.activation = config.hidden_act
self.act = ACT2FN[config.hidden_act]
self.use_bias = config.mamba_proj_bias
self.layer_norm_epsilon = config.rms_norm_eps
self.groups_time_state_size = config.mamba_n_groups * self.ssm_state_size
self.n_groups = config.mamba_n_groups
self.head_dim = config.mamba_d_head
self.chunk_size = config.mamba_chunk_size
# FIXME:
self.time_step_limit = (0.0, float("inf"))
self.time_step_min = 0.001
self.time_step_max = 0.1
self.conv_dim = self.intermediate_size + 2 * self.n_groups * self.ssm_state_size
self.conv1d = nn.Conv1d(
in_channels=self.conv_dim,
out_channels=self.conv_dim,
bias=config.mamba_conv_bias,
kernel_size=self.conv_kernel_size,
groups=self.conv_dim,
padding=self.conv_kernel_size - 1,
)
# projection of the input hidden states
projection_size = self.intermediate_size + self.conv_dim + self.num_heads
self.in_proj = nn.Linear(
self.hidden_size,
projection_size,
bias=self.use_bias,
)
# selective projection used to make dt, B and C input dependant
# time step projection (discretization)
# instantiate once and copy inv_dt in init_weights of PretrainedModel
self.dt_bias = nn.Parameter(torch.ones(self.num_heads))
# S4D real initialization. These are not discretized!
# The core is to load them, compute the discrete states, then write the updated state. Keeps the memory bounded
A = torch.arange(1, self.num_heads + 1)
self.A_log = nn.Parameter(torch.log(A))
self.mamba_rms_norm = config.mamba_rms_norm
if self.mamba_rms_norm:
self.norm = FalconH1RMSNormGated(
self.intermediate_size,
eps=self.layer_norm_epsilon,
n_groups=self.n_groups,
norm_before_gate=config.mamba_norm_before_gate,
)
self.D = nn.Parameter(torch.ones(self.num_heads))
self.out_proj = nn.Linear(self.intermediate_size, config.hidden_size, bias=config.projectors_bias)
if not is_fast_path_available:
logger.warning_once(
"The fast path is not available because one of `(selective_state_update, causal_conv1d_fn, causal_conv1d_update)`"
" is None. Falling back to the naive implementation. To install follow https://github.com/state-spaces/mamba/#installation and"
" https://github.com/Dao-AILab/causal-conv1d"
)
else:
logger.warning_once("The fast path for FalconH1 will be used when running the model on a GPU")
self.zxbcdt_multipliers = config.ssm_multipliers
self.ssm_in_multiplier = config.ssm_in_multiplier
def cuda_kernels_forward(
self,
hidden_states: torch.Tensor,
cache_params: Optional[FalconHybridMambaAttentionDynamicCache] = None,
cache_position: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
):
# 1. Gated MLP's linear projection
hidden_states = apply_mask_to_padding_states(hidden_states, attention_mask)
# Add Multipliers
hidden_states = hidden_states * self.ssm_in_multiplier
projected_states = self.in_proj(hidden_states)
projected_states = projected_states * self.mup_vector # ADD Mup Multipliers
d_to_remove = 2 * self.intermediate_size + 2 * self.n_groups * self.ssm_state_size + self.num_heads
# Set up dimensions for reshapes later
batch_size, seq_len, _ = hidden_states.shape
groups_time_state_size = self.n_groups * self.ssm_state_size
use_precomputed_states = (
cache_params is not None
and cache_params.has_previous_state
and seq_len == 1
and cache_params.conv_states[self.layer_idx].shape[0]
== cache_params.ssm_states[self.layer_idx].shape[0]
== batch_size
and cache_position is not None
and cache_position[0] > 0
)
# getting projected states from cache if it exists
if use_precomputed_states:
d_mlp = (projected_states.squeeze(1).shape[-1] - d_to_remove) // 2
z0, x0, gate, hidden_states_B_C, dt = projected_states.squeeze(1).split(
[d_mlp, d_mlp, self.intermediate_size, self.conv_dim, self.num_heads], dim=-1
)
# 2. Convolution sequence transformation
hidden_states_B_C = causal_conv1d_update(
hidden_states_B_C,
cache_params.conv_states[self.layer_idx],
self.conv1d.weight.squeeze(1),
self.conv1d.bias,
self.activation,
)
hidden_states, B, C = torch.split(
hidden_states_B_C,
[self.intermediate_size, groups_time_state_size, groups_time_state_size],
dim=-1,
)
# 3. SSM transformation
A = -torch.exp(self.A_log.float()) # (nheads,)
A = A[:, None, ...][:, :, None].expand(-1, self.head_dim, self.ssm_state_size).to(dtype=torch.float32)
dt = dt[:, :, None].expand(-1, -1, self.head_dim)
dt_bias = self.dt_bias[:, None, ...].expand(-1, self.head_dim)
D = self.D[:, None, ...].expand(-1, self.head_dim)
B = B.view(batch_size, self.n_groups, B.shape[1] // self.n_groups)
C = C.view(batch_size, self.n_groups, C.shape[1] // self.n_groups)
hidden_states_reshaped = hidden_states.view(batch_size, self.num_heads, self.head_dim)
hidden_states = selective_state_update(
cache_params.ssm_states[self.layer_idx],
hidden_states_reshaped,
dt,
A,
B,
C,
D,
z=gate.view(batch_size, self.num_heads, self.head_dim) if not self.mamba_rms_norm else None,
dt_bias=dt_bias,
dt_softplus=True,
)
hidden_states = hidden_states.view(batch_size, self.num_heads * self.head_dim)
if self.mamba_rms_norm:
hidden_states = self.norm(hidden_states, gate)
if d_mlp > 0:
hidden_states = torch.cat([F.silu(z0) * x0, hidden_states], dim=-1)
# 4. Final linear projection
out = self.out_proj(hidden_states[:, None, ...])
# Fused calculations or step by step if no initialized cache is found
else:
A = -torch.exp(self.A_log.float()) # (num_heads) or (intermediate_size, state_size)
dt_limit_kwargs = {} if self.time_step_limit == (0.0, float("inf")) else {"dt_limit": self.time_step_limit}
# 2-4. Fused kernel for conv1d, SSM, and the final projection
if self.training and cache_params is None:
out = mamba_split_conv1d_scan_combined(
projected_states,
self.conv1d.weight.squeeze(1),
self.conv1d.bias,
self.dt_bias,
A,
D=self.D,
chunk_size=self.chunk_size,
seq_idx=None, # was seq_idx
activation=self.activation,
rmsnorm_weight=self.norm.weight if self.mamba_rms_norm else None,
rmsnorm_eps=self.norm.variance_epsilon if self.mamba_rms_norm else None,
outproj_weight=self.out_proj.weight,
outproj_bias=self.out_proj.bias,
headdim=self.head_dim,
ngroups=self.n_groups,
norm_before_gate=False,
return_final_states=False,
**dt_limit_kwargs,
)
else:
d_mlp = (
projected_states.shape[-1]
- 2 * self.intermediate_size
- 2 * self.n_groups * self.ssm_state_size
- self.num_heads
) // 2
if attention_mask is not None:
projected_states = projected_states * attention_mask[..., None]
_, gate, hidden_states_B_C, dt = projected_states.split(
[
2 * d_mlp,
self.intermediate_size,
self.conv_dim,
self.num_heads,
],
dim=-1,
)
if cache_params is not None:
conv_states = F.pad(
hidden_states_B_C.permute(0, 2, 1),
(self.conv_kernel_size - hidden_states_B_C.shape[-2], 0),
)
cache_params.update_conv_state(self.layer_idx, conv_states, cache_position)
time_step = nn.functional.softplus(dt + self.dt_bias)
# 1D Convolution
if causal_conv1d_fn is None or self.activation not in ["silu", "swish"]:
hidden_states_B_C = self.act(
self.conv1d(hidden_states_B_C.transpose(1, 2)).transpose(1, 2)[:, :seq_len]
) # (B, L, self.d_inner + 2 * ngroups * d_state)
else:
hidden_states_B_C = causal_conv1d_fn(
x=hidden_states_B_C.transpose(1, 2),
weight=self.conv1d.weight.squeeze(1),
bias=self.conv1d.bias,
activation=self.activation,
).transpose(1, 2)[:, :seq_len]
hidden_states, B, C = torch.split(
hidden_states_B_C,
[
self.intermediate_size,
groups_time_state_size,
groups_time_state_size,
],
dim=-1,
)
if attention_mask is not None and attention_mask.shape[1] > 1 and attention_mask.shape[0] > 1:
# tune out hidden states for pad tokens, see https://github.com/state-spaces/mamba/issues/66
dtype = hidden_states.dtype
hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype)
# This is a hack to make sure multi-GPU inference works with HF accelerate
# see: https://github.com/Dao-AILab/flash-attention/issues/523 for more details
with torch.cuda.device(hidden_states.device):
scan_output, ssm_state = mamba_chunk_scan_combined(
hidden_states.view(batch_size, seq_len, -1, self.head_dim),
time_step,
A,
B.view(batch_size, seq_len, self.n_groups, -1),
C.view(batch_size, seq_len, self.n_groups, -1),
chunk_size=self.chunk_size,
D=self.D,
z=None,
seq_idx=None,
return_final_states=True,
**dt_limit_kwargs,
)
if ssm_state is not None and cache_params is not None:
cache_params.ssm_states[self.layer_idx].copy_(ssm_state)
scan_output = scan_output.view(batch_size, seq_len, -1)
# Multiply "gate" branch and apply extra normalization layer
if self.mamba_rms_norm:
out = self.norm(scan_output, gate)
else:
out = scan_output * torch.nn.functional.silu(gate)
out = self.out_proj(out)
return out
# fmt: off
def torch_forward(
self,
input_states,
cache_params: Optional[FalconHybridMambaAttentionDynamicCache] = None,
cache_position: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
):
batch_size, seq_len, _ = input_states.shape
dtype = input_states.dtype
# 1. Gated MLP's linear projection
input_states = apply_mask_to_padding_states(input_states, attention_mask)
# Add Multipliers
input_states = input_states * self.ssm_in_multiplier
projected_states = self.in_proj(input_states)
projected_states = projected_states * self.mup_vector # ADD Mup Multipliers
gate, hidden_states_B_C, dt = projected_states.split([
self.intermediate_size, self.conv_dim, self.num_heads
], dim=-1)
use_precomputed_states = (
cache_params is not None
and cache_params.has_previous_state
and seq_len == 1
and cache_params.conv_states[self.layer_idx].shape[0]
== cache_params.ssm_states[self.layer_idx].shape[0]
== batch_size
and cache_position is not None
and cache_position[0] > 0
)
# 2. Convolution sequence transformation
if use_precomputed_states:
cache_params.conv_states[self.layer_idx] = cache_params.conv_states[self.layer_idx].roll(shifts=-1, dims=-1)
cache_params.conv_states[self.layer_idx][:, :, -1] = hidden_states_B_C[:, 0, :].to(cache_params.conv_states[self.layer_idx].device)
# We need to guarantee that anything regarding the cache is on the same device
conv_states = cache_params.conv_states[self.layer_idx].to(device=self.conv1d.weight.device)
hidden_states_B_C = torch.sum(
conv_states * self.conv1d.weight.squeeze(1), dim=-1
)
if self.use_conv_bias:
hidden_states_B_C = hidden_states_B_C + self.conv1d.bias
hidden_states_B_C = self.act(hidden_states_B_C)
else:
# Init cache
if cache_params is not None:
hidden_states_B_C_transposed = hidden_states_B_C.transpose(1, 2)
conv_states = nn.functional.pad(
hidden_states_B_C_transposed, (self.conv_kernel_size - hidden_states_B_C_transposed.shape[-1], 0)
)
cache_params.conv_states[self.layer_idx].copy_(conv_states)
hidden_states_B_C = self.act(self.conv1d(hidden_states_B_C.transpose(1, 2))[..., :seq_len].transpose(1, 2))
hidden_states_B_C = apply_mask_to_padding_states(hidden_states_B_C, attention_mask)
hidden_states, B, C = torch.split(
hidden_states_B_C,
[self.intermediate_size, self.n_groups * self.ssm_state_size, self.n_groups * self.ssm_state_size],
dim=-1
)
# 3. SSM transformation
A = -torch.exp(self.A_log.float()) # [num_heads]
if use_precomputed_states:
# We need to guarantee that anything regarding the cache is on the same device
cache_device = cache_params.ssm_states[self.layer_idx].device
# Note: there is no need to pad parameter matrices here, as there is just one new token
# for batched generation
dt = dt[:, 0, :][:, None, ...]
dt = dt.transpose(1, 2).expand(batch_size, dt.shape[-1], self.head_dim)
# [num_heads] -> [num_heads, head_dim]
dt_bias = self.dt_bias[..., None].expand(self.dt_bias.shape[0], self.head_dim)
dt = torch.nn.functional.softplus(dt + dt_bias.to(dt.dtype))
dt = torch.clamp(dt, self.time_step_limit[0], self.time_step_limit[1])
A = A[..., None, None].expand(self.num_heads, self.head_dim, self.ssm_state_size).to(dtype=torch.float32)
# [bsz, num_heads, head_dim, state_size]
dA = (torch.exp(dt[..., None] * A)).to(device=cache_device)
# Discretize B
# [bsz, n_groups * state_size] -> [bsz, n_groups, 1, state_size] ->
# -> [bsz, n_groups, group to head repetition factor, state_size] -> [bsz, num_heads, state_size]
B = B.reshape(batch_size, self.n_groups, -1)[..., None, :]
B = B.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, B.shape[-1]).contiguous()
B = B.reshape(batch_size, -1, B.shape[-1])
# [bsz, num_heads, head_dim, state_size]
dB = dt[..., None] * B[..., None, :]
# Discretize x into dB
# [bsz, intermediate_size] -> [bsz, num_heads, head_dim]
hidden_states = hidden_states.reshape(batch_size, -1, self.head_dim)
dBx = (dB * hidden_states[..., None]).to(device=cache_device)
# State calculation
cache_params.ssm_states[self.layer_idx].copy_(
cache_params.ssm_states[self.layer_idx] * dA + dBx
)
# Subsequent output
# [bsz, n_groups * state_size] -> [bsz, num_heads, state_size]
C = C.reshape(batch_size, self.n_groups, -1)[..., None, :]
C = C.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, C.shape[-1]).contiguous()
C = C.reshape(batch_size, -1, C.shape[-1])
# [bsz, num_heads, head_dim]
ssm_states = cache_params.ssm_states[self.layer_idx].to(device=C.device, dtype=C.dtype) # Shape: [b, h, d, n]
# Reshape ssm_states to merge the first two dimensions
ssm_states_reshaped = ssm_states.view(batch_size * self.num_heads, self.head_dim, self.ssm_state_size) # Shape: [b*h, d, n]
C_reshaped = C.view(batch_size * self.num_heads, self.ssm_state_size, 1) # Shape: [b*h, n, 1]
y = torch.bmm(ssm_states_reshaped, C_reshaped)
y = y.view(batch_size, self.num_heads, self.head_dim)
# D skip connection
# [num_heads] -> [num_heads, head_dim]
D = self.D[..., None].expand(self.D.shape[0], self.head_dim)
y = (y + hidden_states * D).to(y.dtype)
# [bsz, num_heads, head_dim] -> [bsz, 1, intermediate_size]
y = y.reshape(batch_size, -1)[:, None, ...]
else:
# begin ssd naive implementation without einsums
dt = nn.functional.softplus(dt + self.dt_bias)
dt = torch.clamp(dt, self.time_step_limit[0], self.time_step_limit[1])
hidden_states = hidden_states.reshape(batch_size, seq_len, -1, self.head_dim).float()
B = B.reshape(batch_size, seq_len, -1, self.ssm_state_size).float()
C = C.reshape(batch_size, seq_len, -1, self.ssm_state_size).float()
B = B.repeat_interleave(self.num_heads // self.n_groups, dim=2, output_size=self.num_heads)
C = C.repeat_interleave(self.num_heads // self.n_groups, dim=2, output_size=self.num_heads)
pad_size = (self.chunk_size - seq_len % self.chunk_size) % self.chunk_size
D_residual = self.D[..., None] * pad_tensor_by_size(hidden_states, pad_size)
# Discretize x and A
hidden_states = hidden_states * dt[..., None]
A = A.to(hidden_states.dtype) * dt
# Rearrange into blocks/chunks
hidden_states, A, B, C = [reshape_into_chunks(t, pad_size, self.chunk_size) for t in (hidden_states, A, B, C)]
# [bsz, -1, chunk_size, num_heads] -> [bsz, num_heads, -1, chunk_size]
A = A.permute(0, 3, 1, 2)
A_cumsum = torch.cumsum(A, dim=-1)
# 1. Compute the output for each intra-chunk (diagonal blocks)
# This is the analog of a causal mask
L = torch.exp(segment_sum(A))
# Contraction of C and B to get G (attention-weights like)
G_intermediate = C[:, :, :, None, :, :] * B[:, :, None, :, :, :] # shape: (b, c, l, s, h, n)
G = G_intermediate.sum(dim=-1) # shape: (b, c, l, s, h)
# Compute M, equivalent to applying attention mask to weights
M_intermediate = G[..., None] * L.permute(0, 2, 3, 4, 1)[..., None]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | true |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/falcon_h1/__init__.py | src/transformers/models/falcon_h1/__init__.py | # Copyright 2025 TII and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_falcon_h1 import *
from .modeling_falcon_h1 import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/bartpho/__init__.py | src/transformers/models/bartpho/__init__.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .tokenization_bartpho import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/bartpho/tokenization_bartpho.py | src/transformers/models/bartpho/tokenization_bartpho.py | # coding=utf-8
# Copyright 2021 VinAI Research and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
"""Tokenization classes for BARTpho-syllable model."""
import os
from shutil import copyfile
from typing import Any, Optional
from ...tokenization_python import AddedToken
from ...tokenization_utils_sentencepiece import SentencePieceBackend
from ...utils import logging
from ...utils.import_utils import requires
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
@requires(backends=("sentencepiece",))
class BartphoTokenizer(SentencePieceBackend):
"""
Adapted from [`XLMRobertaTokenizer`]. Based on [SentencePiece](https://github.com/google/sentencepiece).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file. This vocabulary is the pre-trained SentencePiece model available from the
multilingual XLM-RoBERTa, also used in mBART, consisting of 250K types.
monolingual_vocab_file (`str`):
Path to the monolingual vocabulary file. This monolingual vocabulary consists of Vietnamese-specialized
types extracted from the multilingual vocabulary vocab_file of 250K types.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
Attributes:
sp_model (`SentencePieceProcessor`):
The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
is_fast = False
def __init__(
self,
vocab_file,
monolingual_vocab_file,
bos_token="<s>",
eos_token="</s>",
sep_token="</s>",
cls_token="<s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
sp_model_kwargs: Optional[dict[str, Any]] = None,
**kwargs,
) -> None:
# Mask token behave like a normal word, i.e. include the space before it
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
self.monolingual_vocab_file = monolingual_vocab_file
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
self.fairseq_tokens_to_ids = {}
cnt = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(token) not in self.fairseq_tokens_to_ids:
self.fairseq_tokens_to_ids[str(token)] = cnt
cnt += 1
with open(monolingual_vocab_file, "r", encoding="utf-8") as f:
for line in f:
token = line.strip().split()[0]
self.fairseq_tokens_to_ids[token] = len(self.fairseq_tokens_to_ids)
if str(mask_token) not in self.fairseq_tokens_to_ids:
self.fairseq_tokens_to_ids[str(mask_token)] = len(self.fairseq_tokens_to_ids)
self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
# Prepare sp_model_kwargs for parent class
if sp_model_kwargs is not None:
kwargs["sp_model_kwargs"] = sp_model_kwargs
# Call parent init (which will load sp_model)
super().__init__(
vocab_file=vocab_file,
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
sep_token=sep_token,
cls_token=cls_token,
pad_token=pad_token,
mask_token=mask_token,
**kwargs,
)
self._align_added_tokens_with_fairseq_vocab()
def build_inputs_with_special_tokens(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None
) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An BARTPho sequence has the following format:
- single sequence: `<s> X </s>`
- pair of sequences: `<s> A </s></s> B </s>`
Args:
token_ids_0 (`list[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + sep + token_ids_1 + sep
def get_special_tokens_mask(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None, already_has_special_tokens: bool = False
) -> list[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
if token_ids_1 is None:
return [1] + ([0] * len(token_ids_0)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
def create_token_type_ids_from_sequences(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None
) -> list[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. BARTPho does not
make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of zeros.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
@property
def vocab_size(self):
"""Override to return fairseq vocab size instead of sp_model vocab size"""
return len(self.fairseq_ids_to_tokens)
def get_vocab(self):
"""Override to use fairseq vocabulary"""
vocab = dict(self.fairseq_tokens_to_ids)
if hasattr(self, "_added_tokens_encoder"):
for token, idx in self._added_tokens_encoder.items():
if token not in vocab:
vocab[token] = idx
return vocab
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the fairseq vocab."""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def _convert_token_to_id_with_added_voc(self, token):
"""Override to use fairseq vocab instead of sp_model vocab."""
if token is None:
return None
if token in self._added_tokens_encoder:
return self._added_tokens_encoder[token]
return self._convert_token_to_id(token)
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the fairseq vocab."""
return self.fairseq_ids_to_tokens[index]
def _align_added_tokens_with_fairseq_vocab(self):
"""
The slow tokenizer base class populates `_added_tokens_*` using SentencePiece ids. Remap those entries so that
every token present in the reduced fairseq dictionary uses the same ids everywhere, otherwise conversions and
special-token setters observe two different vocabularies.
"""
if not hasattr(self, "_added_tokens_decoder") or not hasattr(self, "_added_tokens_encoder"):
return
remapped_decoder: dict[int, AddedToken] = {}
for original_id, token_obj in self._added_tokens_decoder.items():
token = token_obj.content
new_id = self.fairseq_tokens_to_ids.get(token, original_id)
remapped_decoder[new_id] = token_obj
self._added_tokens_decoder = remapped_decoder
self._added_tokens_encoder = {token.content: idx for idx, token in remapped_decoder.items()}
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
out_monolingual_vocab_file = os.path.join(
save_directory,
(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["monolingual_vocab_file"],
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, out_vocab_file)
elif not os.path.isfile(self.vocab_file):
with open(out_vocab_file, "wb") as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
if os.path.abspath(self.monolingual_vocab_file) != os.path.abspath(
out_monolingual_vocab_file
) and os.path.isfile(self.monolingual_vocab_file):
copyfile(self.monolingual_vocab_file, out_monolingual_vocab_file)
elif not os.path.isfile(self.monolingual_vocab_file):
with open(out_monolingual_vocab_file, "w", encoding="utf-8") as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"{str(token)} \n")
return out_vocab_file, out_monolingual_vocab_file
__all__ = ["BartphoTokenizer"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/mvp/configuration_mvp.py | src/transformers/models/mvp/configuration_mvp.py | # coding=utf-8
# Copyright 2022 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MVP model configuration"""
from ...configuration_utils import PreTrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
class MvpConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`MvpModel`]. It is used to instantiate a MVP model
according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the MVP [RUCAIBox/mvp](https://huggingface.co/RUCAIBox/mvp)
architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50267):
Vocabulary size of the MVP model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`MvpModel`].
d_model (`int`, *optional*, defaults to 1024):
Dimensionality of the layers and the pooler layer.
encoder_layers (`int`, *optional*, defaults to 12):
Number of encoder layers.
decoder_layers (`int`, *optional*, defaults to 12):
Number of decoder layers.
encoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
decoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
encoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
classifier_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for classifier.
max_position_embeddings (`int`, *optional*, defaults to 1024):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
encoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
decoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
scale_embedding (`bool`, *optional*, defaults to `False`):
Scale embeddings by diving by sqrt(d_model).
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
use_prompt (`bool`, *optional*, defaults to `False`):
Whether or not to use prompt.
prompt_length (`int`, *optional*, defaults to 100):
The length of prompt.
prompt_mid_dim (`int`, *optional*, defaults to 800):
Dimensionality of the "intermediate" layer in prompt.
Example:
```python
>>> from transformers import MvpConfig, MvpModel
>>> # Initializing a MVP RUCAIBox/mvp style configuration
>>> configuration = MvpConfig()
>>> # Initializing a model (with random weights) from the RUCAIBox/mvp style configuration
>>> model = MvpModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "mvp"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__(
self,
vocab_size=50267,
max_position_embeddings=1024,
encoder_layers=12,
encoder_ffn_dim=4096,
encoder_attention_heads=16,
decoder_layers=12,
decoder_ffn_dim=4096,
decoder_attention_heads=16,
encoder_layerdrop=0.0,
decoder_layerdrop=0.0,
activation_function="gelu",
d_model=1024,
dropout=0.1,
attention_dropout=0.0,
activation_dropout=0.0,
init_std=0.02,
classifier_dropout=0.0,
scale_embedding=False,
use_cache=True,
pad_token_id=1,
bos_token_id=0,
eos_token_id=2,
is_encoder_decoder=True,
decoder_start_token_id=2,
use_prompt=False,
prompt_length=100,
prompt_mid_dim=800,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.classifier_dropout = classifier_dropout
self.use_cache = use_cache
self.num_hidden_layers = encoder_layers
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
self.use_prompt = use_prompt
self.prompt_length = prompt_length
self.prompt_mid_dim = prompt_mid_dim
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
is_encoder_decoder=is_encoder_decoder,
decoder_start_token_id=decoder_start_token_id,
**kwargs,
)
__all__ = ["MvpConfig"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/mvp/__init__.py | src/transformers/models/mvp/__init__.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from ..roberta.tokenization_roberta import RobertaTokenizer as MvpTokenizer
from .configuration_mvp import *
from .modeling_mvp import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/mvp/modeling_mvp.py | src/transformers/models/mvp/modeling_mvp.py | # coding=utf-8
# Copyright 2022 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch MVP model."""
import math
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ... import initialization as init
from ...activations import ACT2FN
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from ...generation import GenerationMixin
from ...modeling_attn_mask_utils import (
_prepare_4d_attention_mask,
_prepare_4d_causal_attention_mask,
)
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
Seq2SeqLMOutput,
Seq2SeqModelOutput,
Seq2SeqQuestionAnsweringModelOutput,
Seq2SeqSequenceClassifierOutput,
)
from ...modeling_utils import PreTrainedModel
from ...utils import auto_docstring, logging
from .configuration_mvp import MvpConfig
logger = logging.get_logger(__name__)
# Copied from transformers.models.bart.modeling_bart.shift_tokens_right
def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
"""
Shift input ids one token to the right.
"""
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
shifted_input_ids[:, 0] = decoder_start_token_id
if pad_token_id is None:
raise ValueError("self.model.config.pad_token_id has to be defined.")
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
return shifted_input_ids
# Copied from transformers.models.bart.modeling_bart.BartLearnedPositionalEmbedding with Bart->Mvp
class MvpLearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
"""
def __init__(self, num_embeddings: int, embedding_dim: int):
# Mvp is set up so that if padding_idx is specified then offset the embedding ids by 2
# and adjust num_embeddings appropriately. Other models don't have this hack
self.offset = 2
super().__init__(num_embeddings + self.offset, embedding_dim)
def forward(
self, input_ids: torch.Tensor, past_key_values_length: int = 0, position_ids: Optional[torch.Tensor] = None
):
"""`input_ids' shape is expected to be [bsz x seqlen]."""
if position_ids is None:
bsz, seq_len = input_ids.shape[:2]
position_ids = torch.arange(
past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
).expand(bsz, -1)
else:
position_ids = position_ids.unsqueeze(0)
return super().forward(position_ids + self.offset)
class MvpAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: Optional[float] = 0.0,
is_decoder: Optional[bool] = False,
bias: Optional[bool] = True,
layer_idx: Optional[bool] = None,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.layer_idx = layer_idx
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
attention_mask: Optional[torch.Tensor] = None,
attn_prompt: Optional[torch.Tensor] = None,
output_attentions: bool = False,
cache_position: Optional[torch.Tensor] = None,
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, _ = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
is_updated = False
if past_key_values is not None:
if isinstance(past_key_values, EncoderDecoderCache):
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
# after the first generated id, we can subsequently re-use all key/value_states from cache
curr_past_key_values = past_key_values.cross_attention_cache
else:
curr_past_key_values = past_key_values.self_attention_cache
else:
curr_past_key_values = past_key_values
current_states = key_value_states if is_cross_attention else hidden_states
if is_cross_attention and past_key_values is not None and is_updated:
# reuse k,v, cross_attentions
key_states = curr_past_key_values.layers[self.layer_idx].keys
value_states = curr_past_key_values.layers[self.layer_idx].values
else:
key_states = self.k_proj(current_states)
value_states = self.v_proj(current_states)
key_states = key_states.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2)
if past_key_values is not None:
# save all key/value_states to cache to be re-used for fast auto-regressive generation
cache_position = cache_position if not is_cross_attention else None
key_states, value_states = curr_past_key_values.update(
key_states, value_states, self.layer_idx, {"cache_position": cache_position}
)
# set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls
if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache):
past_key_values.is_updated[self.layer_idx] = True
if attn_prompt is not None:
key_states = torch.cat([attn_prompt[0].expand(bsz, -1, -1, -1), key_states], dim=2)
value_states = torch.cat([attn_prompt[1].expand(bsz, -1, -1, -1), value_states], dim=2)
if attention_mask is not None:
prompt_mask = torch.zeros(bsz, 1, tgt_len, attn_prompt[0].size(1)).to(attention_mask.device)
attention_mask = torch.cat([prompt_mask, attention_mask], dim=(-1))
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = query_states.view(bsz, tgt_len, self.num_heads, self.head_dim).transpose(1, 2)
query_states = query_states.reshape(*proj_shape)
key_states = key_states.reshape(*proj_shape)
value_states = value_states.reshape(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
f" {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to be reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned across GPUs when using tensor-parallelism.
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped
class MvpEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: MvpConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = MvpAttention(
embed_dim=self.embed_dim,
num_heads=config.encoder_attention_heads,
dropout=config.attention_dropout,
)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.FloatTensor,
attention_mask: torch.FloatTensor,
self_attn_prompt: torch.FloatTensor,
output_attentions: Optional[bool] = False,
) -> tuple[torch.FloatTensor, Optional[torch.FloatTensor]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
self_attn_prompt (`torch.FloatTensor`): prompt of self attention of shape
`(2, encoder_attention_heads, pro_len, head_dim)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
attn_prompt=self_attn_prompt,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
if hidden_states.dtype == torch.float16 and (
torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
):
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
return hidden_states, attn_weights
class MvpDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: MvpConfig, layer_idx=None):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = MvpAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
layer_idx=layer_idx,
)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.encoder_attn = MvpAttention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
layer_idx=layer_idx,
)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
self_attn_prompt: Optional[torch.Tensor] = None,
cross_attn_prompt: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = True,
cache_position: Optional[torch.Tensor] = None,
) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
self_attn_prompt (`torch.FloatTensor`): prompt of self attention of shape
`(2, decoder_attention_heads, pro_len, head_dim)`.
cross_attn_prompt (`torch.FloatTensor`): prompt of cross attention of shape
`(2, decoder_attention_heads, pro_len, head_dim)`.
past_key_values (`Cache`): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
# Self Attention
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states,
past_key_values=past_key_values,
attention_mask=attention_mask,
attn_prompt=self_attn_prompt,
output_attentions=output_attentions,
cache_position=cache_position,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Cross-Attention Block
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states, cross_attn_weights = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
attn_prompt=cross_attn_prompt,
past_key_values=past_key_values,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# Fully Connected
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
return outputs
# Copied from transformers.models.bart.modeling_bart.BartClassificationHead with Bart->MVP
class MvpClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(
self,
input_dim: int,
inner_dim: int,
num_classes: int,
pooler_dropout: float,
):
super().__init__()
self.dense = nn.Linear(input_dim, inner_dim)
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dropout(hidden_states)
hidden_states = self.dense(hidden_states)
hidden_states = torch.tanh(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.out_proj(hidden_states)
return hidden_states
class MvpPrompt(nn.Module):
"""Layer-wise prompt for encoder or decoder."""
def __init__(self, config, num_layers, num_heads):
super().__init__()
self.prompt_length = config.prompt_length
self.num_layers = num_layers
self.num_heads = num_heads
self.head_dim = config.d_model // num_heads
self.dropout = nn.Dropout(p=config.dropout)
self.prompt_embedding = nn.Embedding(config.prompt_length, config.d_model)
self.prompt_trans = nn.Sequential(
nn.Linear(config.d_model, config.prompt_mid_dim),
nn.GELU(),
nn.Linear(config.prompt_mid_dim, num_layers * 2 * config.d_model),
)
def forward(self, prompt_ids: torch.Tensor) -> tuple[torch.Tensor]:
prompt = self.prompt_trans(self.prompt_embedding(prompt_ids))
prompt = prompt.view(self.prompt_length, self.num_layers * 2, self.num_heads, self.head_dim)
prompt = self.dropout(prompt)
prompt = prompt.permute([1, 2, 0, 3]).split(2)
return prompt
@auto_docstring
class MvpPreTrainedModel(PreTrainedModel):
config: MvpConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, MvpForConditionalGeneration):
init.zeros_(module.final_logits_bias)
@property
def dummy_inputs(self):
pad_token = self.config.pad_token_id
input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device)
dummy_inputs = {
"attention_mask": input_ids.ne(pad_token),
"input_ids": input_ids,
}
return dummy_inputs
class MvpEncoder(MvpPreTrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`MvpEncoderLayer`].
Args:
config: MvpConfig
embed_tokens (nn.Embedding): output embedding
use_prompt (bool): whether to use prompt
"""
def __init__(
self, config: MvpConfig, embed_tokens: Optional[nn.Embedding] = None, use_prompt: Optional[bool] = False
):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
embed_dim = config.d_model
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)
self.embed_positions = MvpLearnedPositionalEmbedding(
config.max_position_embeddings,
embed_dim,
)
self.layers = nn.ModuleList([MvpEncoderLayer(config) for _ in range(config.encoder_layers)])
self.layernorm_embedding = nn.LayerNorm(embed_dim)
self.use_prompt = use_prompt
if use_prompt:
self.prompt_length = config.prompt_length
self.self_attn_prompt = MvpPrompt(
config,
config.encoder_layers,
config.encoder_attention_heads,
)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**kwargs,
) -> Union[tuple, BaseModelOutput]:
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input = input_ids
input_shape = input.shape
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
input = inputs_embeds[:, :, -1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
embed_pos = self.embed_positions(input)
hidden_states = inputs_embeds + embed_pos
hidden_states = self.layernorm_embedding(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
# layer-wise prompt
if self.use_prompt:
prompt_ids = torch.arange(self.prompt_length).to(self.device)
self_attn_prompt = self.self_attn_prompt(prompt_ids)
# expand attention_mask
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://huggingface.co/papers/1909.11556 for description)
to_drop = False
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop: # skip the layer
to_drop = True
if to_drop:
layer_outputs = (None, None)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
self_attn_prompt=(self_attn_prompt[idx] if self.use_prompt else None),
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
class MvpDecoder(MvpPreTrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`MvpDecoderLayer`]
Args:
config: MvpConfig
embed_tokens (nn.Embedding): output embedding
use_prompt (bool): whether to use prompt
"""
def __init__(self, config: MvpConfig, use_prompt: Optional[bool] = False):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = config.pad_token_id
self.max_target_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
self.embed_positions = MvpLearnedPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
)
self.layers = nn.ModuleList([MvpDecoderLayer(config, layer_idx=i) for i in range(config.decoder_layers)])
self.layernorm_embedding = nn.LayerNorm(config.d_model)
self.use_prompt = use_prompt
if use_prompt:
self.prompt_length = config.prompt_length
self.self_attn_prompt = MvpPrompt(
config,
config.decoder_layers,
config.decoder_attention_heads,
)
self.cross_attn_prompt = MvpPrompt(
config,
config.decoder_layers,
config.decoder_attention_heads,
)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs,
) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]:
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | true |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/marian/convert_marian_to_pytorch.py | src/transformers/models/marian/convert_marian_to_pytorch.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import socket
import time
import warnings
from pathlib import Path
from typing import Union
from zipfile import ZipFile
import numpy as np
import torch
from huggingface_hub.hf_api import list_models
from torch import nn
from tqdm import tqdm
from transformers import MarianConfig, MarianMTModel, MarianTokenizer
def remove_suffix(text: str, suffix: str):
if text.endswith(suffix):
return text[: -len(suffix)]
return text # or whatever
def remove_prefix(text: str, prefix: str):
if text.startswith(prefix):
return text[len(prefix) :]
return text # or whatever
def convert_encoder_layer(opus_dict, layer_prefix: str, converter: dict):
sd = {}
for k in opus_dict:
if not k.startswith(layer_prefix):
continue
stripped = remove_prefix(k, layer_prefix)
v = opus_dict[k].T # besides embeddings, everything must be transposed.
sd[converter[stripped]] = torch.tensor(v).squeeze()
return sd
def load_layers_(layer_lst: nn.ModuleList, opus_state: dict, converter, is_decoder=False):
for i, layer in enumerate(layer_lst):
layer_tag = f"decoder_l{i + 1}_" if is_decoder else f"encoder_l{i + 1}_"
sd = convert_encoder_layer(opus_state, layer_tag, converter)
layer.load_state_dict(sd, strict=False)
def find_pretrained_model(src_lang: str, tgt_lang: str) -> list[str]:
"""Find models that can accept src_lang as input and return tgt_lang as output."""
prefix = "Helsinki-NLP/opus-mt-"
model_list = list_models()
model_ids = [x.id for x in model_list if x.id.startswith("Helsinki-NLP")]
src_and_targ = [
remove_prefix(m, prefix).lower().split("-") for m in model_ids if "+" not in m
] # + can't be loaded.
matching = [f"{prefix}{a}-{b}" for (a, b) in src_and_targ if src_lang in a and tgt_lang in b]
return matching
def add_emb_entries(wemb, final_bias, n_special_tokens=1):
vsize, d_model = wemb.shape
embs_to_add = np.zeros((n_special_tokens, d_model))
new_embs = np.concatenate([wemb, embs_to_add])
bias_to_add = np.zeros((n_special_tokens, 1))
new_bias = np.concatenate((final_bias, bias_to_add), axis=1)
return new_embs, new_bias
def _cast_yaml_str(v):
bool_dct = {"true": True, "false": False}
if not isinstance(v, str):
return v
elif v in bool_dct:
return bool_dct[v]
try:
return int(v)
except (TypeError, ValueError):
return v
def cast_marian_config(raw_cfg: dict[str, str]) -> dict:
return {k: _cast_yaml_str(v) for k, v in raw_cfg.items()}
CONFIG_KEY = "special:model.yml"
def load_config_from_state_dict(opus_dict):
import yaml
cfg_str = "".join([chr(x) for x in opus_dict[CONFIG_KEY]])
yaml_cfg = yaml.load(cfg_str[:-1], Loader=yaml.BaseLoader)
return cast_marian_config(yaml_cfg)
def find_model_file(dest_dir): # this one better
model_files = list(Path(dest_dir).glob("*.npz"))
if len(model_files) != 1:
raise ValueError(f"Found more than one model file: {model_files}")
model_file = model_files[0]
return model_file
# Group Names Logic: change long opus model names to something shorter, like opus-mt-en-ROMANCE
ROM_GROUP = (
"fr+fr_BE+fr_CA+fr_FR+wa+frp+oc+ca+rm+lld+fur+lij+lmo+es+es_AR+es_CL+es_CO+es_CR+es_DO+es_EC+es_ES+es_GT"
"+es_HN+es_MX+es_NI+es_PA+es_PE+es_PR+es_SV+es_UY+es_VE+pt+pt_br+pt_BR+pt_PT+gl+lad+an+mwl+it+it_IT+co"
"+nap+scn+vec+sc+ro+la"
)
GROUPS = [
("cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh", "ZH"),
(ROM_GROUP, "ROMANCE"),
("de+nl+fy+af+da+fo+is+no+nb+nn+sv", "NORTH_EU"),
("da+fo+is+no+nb+nn+sv", "SCANDINAVIA"),
("se+sma+smj+smn+sms", "SAMI"),
("nb_NO+nb+nn_NO+nn+nog+no_nb+no", "NORWAY"),
("ga+cy+br+gd+kw+gv", "CELTIC"), # https://en.wikipedia.org/wiki/Insular_Celtic_languages
]
GROUP_TO_OPUS_NAME = {
"opus-mt-ZH-de": "cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh-de",
"opus-mt-ZH-fi": "cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh-fi",
"opus-mt-ZH-sv": "cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh-sv",
"opus-mt-SCANDINAVIA-SCANDINAVIA": "da+fo+is+no+nb+nn+sv-da+fo+is+no+nb+nn+sv",
"opus-mt-NORTH_EU-NORTH_EU": "de+nl+fy+af+da+fo+is+no+nb+nn+sv-de+nl+fy+af+da+fo+is+no+nb+nn+sv",
"opus-mt-de-ZH": "de-cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh",
"opus-mt-en_el_es_fi-en_el_es_fi": "en+el+es+fi-en+el+es+fi",
"opus-mt-en-ROMANCE": (
"en-fr+fr_BE+fr_CA+fr_FR+wa+frp+oc+ca+rm+lld+fur+lij+lmo+es+es_AR+es_CL+es_CO+es_CR+es_DO"
"+es_EC+es_ES+es_GT+es_HN+es_MX+es_NI+es_PA+es_PE+es_PR+es_SV+es_UY+es_VE+pt+pt_br+pt_BR"
"+pt_PT+gl+lad+an+mwl+it+it_IT+co+nap+scn+vec+sc+ro+la"
),
"opus-mt-en-CELTIC": "en-ga+cy+br+gd+kw+gv",
"opus-mt-es-NORWAY": "es-nb_NO+nb+nn_NO+nn+nog+no_nb+no",
"opus-mt-fi_nb_no_nn_ru_sv_en-SAMI": "fi+nb+no+nn+ru+sv+en-se+sma+smj+smn+sms",
"opus-mt-fi-ZH": "fi-cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh",
"opus-mt-fi-NORWAY": "fi-nb_NO+nb+nn_NO+nn+nog+no_nb+no",
"opus-mt-ROMANCE-en": (
"fr+fr_BE+fr_CA+fr_FR+wa+frp+oc+ca+rm+lld+fur+lij+lmo+es+es_AR+es_CL+es_CO+es_CR+es_DO"
"+es_EC+es_ES+es_GT+es_HN+es_MX+es_NI+es_PA+es_PE+es_PR+es_SV+es_UY+es_VE+pt+pt_br+pt_BR"
"+pt_PT+gl+lad+an+mwl+it+it_IT+co+nap+scn+vec+sc+ro+la-en"
),
"opus-mt-CELTIC-en": "ga+cy+br+gd+kw+gv-en",
"opus-mt-sv-ZH": "sv-cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh",
"opus-mt-sv-NORWAY": "sv-nb_NO+nb+nn_NO+nn+nog+no_nb+no",
}
OPUS_GITHUB_URL = "https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/"
ORG_NAME = "Helsinki-NLP/"
def convert_opus_name_to_hf_name(x):
"""For OPUS-MT-Train/ DEPRECATED"""
for substr, grp_name in GROUPS:
x = x.replace(substr, grp_name)
return x.replace("+", "_")
def convert_hf_name_to_opus_name(hf_model_name):
"""
Relies on the assumption that there are no language codes like pt_br in models that are not in GROUP_TO_OPUS_NAME.
"""
hf_model_name = remove_prefix(hf_model_name, ORG_NAME)
if hf_model_name in GROUP_TO_OPUS_NAME:
opus_w_prefix = GROUP_TO_OPUS_NAME[hf_model_name]
else:
opus_w_prefix = hf_model_name.replace("_", "+")
return remove_prefix(opus_w_prefix, "opus-mt-")
def get_system_metadata(repo_root):
import git
return {
"helsinki_git_sha": git.Repo(path=repo_root, search_parent_directories=True).head.object.hexsha,
"transformers_git_sha": git.Repo(path=".", search_parent_directories=True).head.object.hexsha,
"port_machine": socket.gethostname(),
"port_time": time.strftime("%Y-%m-%d-%H:%M"),
}
# docstyle-ignore
FRONT_MATTER_TEMPLATE = """---
language:
{}
tags:
- translation
license: apache-2.0
---
"""
DEFAULT_REPO = "Tatoeba-Challenge"
DEFAULT_MODEL_DIR = os.path.join(DEFAULT_REPO, "models")
def write_model_card(
hf_model_name: str,
repo_root=DEFAULT_REPO,
save_dir=Path("marian_converted"),
dry_run=False,
extra_metadata={},
) -> str:
"""
Copy the most recent model's readme section from opus, and add metadata. upload command: aws s3 sync model_card_dir
s3://models.huggingface.co/bert/Helsinki-NLP/ --dryrun
"""
import pandas as pd
hf_model_name = remove_prefix(hf_model_name, ORG_NAME)
opus_name: str = convert_hf_name_to_opus_name(hf_model_name)
if repo_root not in ("OPUS-MT-train", "Tatoeba-Challenge"):
raise ValueError(f"Repos root is {repo_root}. Expected either OPUS-MT-train or Tatoeba-Challenge")
opus_readme_path = Path(repo_root).joinpath("models", opus_name, "README.md")
if not (opus_readme_path.exists()):
raise ValueError(f"Readme file {opus_readme_path} not found")
opus_src, opus_tgt = [x.split("+") for x in opus_name.split("-")]
readme_url = f"https://github.com/Helsinki-NLP/{repo_root}/tree/master/models/{opus_name}/README.md"
s, t = ",".join(opus_src), ",".join(opus_tgt)
metadata = {
"hf_name": hf_model_name,
"source_languages": s,
"target_languages": t,
"opus_readme_url": readme_url,
"original_repo": repo_root,
"tags": ["translation"],
}
metadata.update(extra_metadata)
metadata.update(get_system_metadata(repo_root))
# combine with opus markdown
extra_markdown = (
f"### {hf_model_name}\n\n* source group: {metadata['src_name']} \n* target group: "
f"{metadata['tgt_name']} \n* OPUS readme: [{opus_name}]({readme_url})\n"
)
content = opus_readme_path.open().read()
content = content.split("\n# ")[-1] # Get the lowest level 1 header in the README -- the most recent model.
splat = content.split("*")[2:]
print(splat[3])
content = "*".join(splat)
content = (
FRONT_MATTER_TEMPLATE.format(metadata["src_alpha2"])
+ extra_markdown
+ "\n* "
+ content.replace("download", "download original weights")
)
items = "\n\n".join([f"- {k}: {v}" for k, v in metadata.items()])
sec3 = "\n### System Info: \n" + items
content += sec3
if dry_run:
return content, metadata
sub_dir = save_dir / f"opus-mt-{hf_model_name}"
sub_dir.mkdir(exist_ok=True)
dest = sub_dir / "README.md"
dest.open("w").write(content)
pd.Series(metadata).to_json(sub_dir / "metadata.json")
# if dry_run:
return content, metadata
def make_registry(repo_path="Opus-MT-train/models"):
if not (Path(repo_path) / "fr-en" / "README.md").exists():
raise ValueError(
f"repo_path:{repo_path} does not exist: "
"You must run: git clone git@github.com:Helsinki-NLP/Opus-MT-train.git before calling."
)
results = {}
for p in Path(repo_path).iterdir():
n_dash = p.name.count("-")
if n_dash == 0:
continue
else:
lns = list(open(p / "README.md").readlines())
results[p.name] = _parse_readme(lns)
return [(k, v["pre-processing"], v["download"], v["download"][:-4] + ".test.txt") for k, v in results.items()]
def convert_all_sentencepiece_models(model_list=None, repo_path=None, dest_dir=Path("marian_converted")):
"""Requires 300GB"""
save_dir = Path("marian_ckpt")
dest_dir = Path(dest_dir)
dest_dir.mkdir(exist_ok=True)
save_paths = []
if model_list is None:
model_list: list = make_registry(repo_path=repo_path)
for k, prepro, download, test_set_url in tqdm(model_list):
if "SentencePiece" not in prepro: # dont convert BPE models.
continue
if not os.path.exists(save_dir / k):
download_and_unzip(download, save_dir / k)
pair_name = convert_opus_name_to_hf_name(k)
convert(save_dir / k, dest_dir / f"opus-mt-{pair_name}")
save_paths.append(dest_dir / f"opus-mt-{pair_name}")
return save_paths
def lmap(f, x) -> list:
return list(map(f, x))
def fetch_test_set(test_set_url):
import wget
fname = wget.download(test_set_url, "opus_test.txt")
lns = Path(fname).open().readlines()
src = lmap(str.strip, lns[::4])
gold = lmap(str.strip, lns[1::4])
mar_model = lmap(str.strip, lns[2::4])
if not (len(gold) == len(mar_model) == len(src)):
raise ValueError(f"Gold, marian and source lengths {len(gold)}, {len(mar_model)}, {len(src)} mismatched")
os.remove(fname)
return src, mar_model, gold
def convert_whole_dir(path=Path("marian_ckpt/")):
for subdir in tqdm(list(path.ls())):
dest_dir = f"marian_converted/{subdir.name}"
if (dest_dir / "pytorch_model.bin").exists():
continue
convert(source_dir, dest_dir)
def _parse_readme(lns):
"""Get link and metadata from opus model card equivalent."""
subres = {}
for ln in [x.strip() for x in lns]:
if not ln.startswith("*"):
continue
ln = ln[1:].strip()
for k in ["download", "dataset", "models", "model", "pre-processing"]:
if ln.startswith(k):
break
else:
continue
if k in ["dataset", "model", "pre-processing"]:
splat = ln.split(":")
_, v = splat
subres[k] = v
elif k == "download":
v = ln.split("(")[-1][:-1]
subres[k] = v
return subres
def save_tokenizer_config(dest_dir: Path, separate_vocabs=False):
dname = dest_dir.name.split("-")
dct = {"target_lang": dname[-1], "source_lang": "-".join(dname[:-1]), "separate_vocabs": separate_vocabs}
save_json(dct, dest_dir / "tokenizer_config.json")
def add_to_vocab_(vocab: dict[str, int], special_tokens: list[str]):
start = max(vocab.values()) + 1
added = 0
for tok in special_tokens:
if tok in vocab:
continue
vocab[tok] = start + added
added += 1
return added
def find_vocab_file(model_dir):
return list(model_dir.glob("*vocab.yml"))[0]
def find_src_vocab_file(model_dir):
return list(model_dir.glob("*src.vocab.yml"))[0]
def find_tgt_vocab_file(model_dir):
return list(model_dir.glob("*trg.vocab.yml"))[0]
def add_special_tokens_to_vocab(model_dir: Path, separate_vocab=False) -> None:
if separate_vocab:
vocab = load_yaml(find_src_vocab_file(model_dir))
vocab = {k: int(v) for k, v in vocab.items()}
num_added = add_to_vocab_(vocab, ["<pad>"])
save_json(vocab, model_dir / "vocab.json")
vocab = load_yaml(find_tgt_vocab_file(model_dir))
vocab = {k: int(v) for k, v in vocab.items()}
num_added = add_to_vocab_(vocab, ["<pad>"])
save_json(vocab, model_dir / "target_vocab.json")
save_tokenizer_config(model_dir, separate_vocabs=separate_vocab)
else:
vocab = load_yaml(find_vocab_file(model_dir))
vocab = {k: int(v) for k, v in vocab.items()}
num_added = add_to_vocab_(vocab, ["<pad>"])
print(f"added {num_added} tokens to vocab")
save_json(vocab, model_dir / "vocab.json")
save_tokenizer_config(model_dir)
def check_equal(marian_cfg, k1, k2):
v1, v2 = marian_cfg[k1], marian_cfg[k2]
if v1 != v2:
raise ValueError(f"hparams {k1},{k2} differ: {v1} != {v2}")
def check_marian_cfg_assumptions(marian_cfg):
assumed_settings = {
"layer-normalization": False,
"right-left": False,
"transformer-ffn-depth": 2,
"transformer-aan-depth": 2,
"transformer-no-projection": False,
"transformer-postprocess-emb": "d",
"transformer-postprocess": "dan", # Dropout, add, normalize
"transformer-preprocess": "",
"type": "transformer",
"ulr-dim-emb": 0,
"dec-cell-base-depth": 2,
"dec-cell-high-depth": 1,
"transformer-aan-nogate": False,
}
for k, v in assumed_settings.items():
actual = marian_cfg[k]
if actual != v:
raise ValueError(f"Unexpected config value for {k} expected {v} got {actual}")
BIAS_KEY = "decoder_ff_logit_out_b"
BART_CONVERTER = { # for each encoder and decoder layer
"self_Wq": "self_attn.q_proj.weight",
"self_Wk": "self_attn.k_proj.weight",
"self_Wv": "self_attn.v_proj.weight",
"self_Wo": "self_attn.out_proj.weight",
"self_bq": "self_attn.q_proj.bias",
"self_bk": "self_attn.k_proj.bias",
"self_bv": "self_attn.v_proj.bias",
"self_bo": "self_attn.out_proj.bias",
"self_Wo_ln_scale": "self_attn_layer_norm.weight",
"self_Wo_ln_bias": "self_attn_layer_norm.bias",
"ffn_W1": "fc1.weight",
"ffn_b1": "fc1.bias",
"ffn_W2": "fc2.weight",
"ffn_b2": "fc2.bias",
"ffn_ffn_ln_scale": "final_layer_norm.weight",
"ffn_ffn_ln_bias": "final_layer_norm.bias",
# Decoder Cross Attention
"context_Wk": "encoder_attn.k_proj.weight",
"context_Wo": "encoder_attn.out_proj.weight",
"context_Wq": "encoder_attn.q_proj.weight",
"context_Wv": "encoder_attn.v_proj.weight",
"context_bk": "encoder_attn.k_proj.bias",
"context_bo": "encoder_attn.out_proj.bias",
"context_bq": "encoder_attn.q_proj.bias",
"context_bv": "encoder_attn.v_proj.bias",
"context_Wo_ln_scale": "encoder_attn_layer_norm.weight",
"context_Wo_ln_bias": "encoder_attn_layer_norm.bias",
}
class OpusState:
def __init__(self, source_dir, eos_token_id=0):
npz_path = find_model_file(source_dir)
self.state_dict = np.load(npz_path)
cfg = load_config_from_state_dict(self.state_dict)
if cfg["dim-vocabs"][0] != cfg["dim-vocabs"][1]:
raise ValueError
if "Wpos" in self.state_dict:
raise ValueError("Wpos key in state dictionary")
self.state_dict = dict(self.state_dict)
if cfg["tied-embeddings-all"]:
cfg["tied-embeddings-src"] = True
cfg["tied-embeddings"] = True
self.share_encoder_decoder_embeddings = cfg["tied-embeddings-src"]
# create the tokenizer here because we need to know the eos_token_id
self.source_dir = source_dir
self.tokenizer = self.load_tokenizer()
# retrieve EOS token and set correctly
tokenizer_has_eos_token_id = (
hasattr(self.tokenizer, "eos_token_id") and self.tokenizer.eos_token_id is not None
)
eos_token_id = self.tokenizer.eos_token_id if tokenizer_has_eos_token_id else 0
if cfg["tied-embeddings-src"]:
self.wemb, self.final_bias = add_emb_entries(self.state_dict["Wemb"], self.state_dict[BIAS_KEY], 1)
self.pad_token_id = self.wemb.shape[0] - 1
cfg["vocab_size"] = self.pad_token_id + 1
else:
self.wemb, _ = add_emb_entries(self.state_dict["encoder_Wemb"], self.state_dict[BIAS_KEY], 1)
self.dec_wemb, self.final_bias = add_emb_entries(
self.state_dict["decoder_Wemb"], self.state_dict[BIAS_KEY], 1
)
# still assuming that vocab size is same for encoder and decoder
self.pad_token_id = self.wemb.shape[0] - 1
cfg["vocab_size"] = self.pad_token_id + 1
cfg["decoder_vocab_size"] = self.pad_token_id + 1
if cfg["vocab_size"] != self.tokenizer.vocab_size:
raise ValueError(
f"Original vocab size {cfg['vocab_size']} and new vocab size {len(self.tokenizer.encoder)} mismatched."
)
# self.state_dict['Wemb'].sha
self.state_keys = list(self.state_dict.keys())
if "Wtype" in self.state_dict:
raise ValueError("Wtype key in state dictionary")
self._check_layer_entries()
self.cfg = cfg
hidden_size, intermediate_shape = self.state_dict["encoder_l1_ffn_W1"].shape
if hidden_size != cfg["dim-emb"]:
raise ValueError(f"Hidden size {hidden_size} and configured size {cfg['dim_emb']} mismatched")
# Process decoder.yml
decoder_yml = cast_marian_config(load_yaml(source_dir / "decoder.yml"))
check_marian_cfg_assumptions(cfg)
self.hf_config = MarianConfig(
vocab_size=cfg["vocab_size"],
decoder_vocab_size=cfg.get("decoder_vocab_size", cfg["vocab_size"]),
share_encoder_decoder_embeddings=cfg["tied-embeddings-src"],
decoder_layers=cfg["dec-depth"],
encoder_layers=cfg["enc-depth"],
decoder_attention_heads=cfg["transformer-heads"],
encoder_attention_heads=cfg["transformer-heads"],
decoder_ffn_dim=cfg["transformer-dim-ffn"],
encoder_ffn_dim=cfg["transformer-dim-ffn"],
d_model=cfg["dim-emb"],
activation_function=cfg["transformer-ffn-activation"],
pad_token_id=self.pad_token_id,
eos_token_id=eos_token_id,
forced_eos_token_id=eos_token_id,
bos_token_id=0,
max_position_embeddings=cfg["dim-emb"],
scale_embedding=True,
normalize_embedding="n" in cfg["transformer-preprocess"],
static_position_embeddings=not cfg["transformer-train-position-embeddings"],
tie_word_embeddings=cfg["tied-embeddings"],
dropout=0.1, # see opus-mt-train repo/transformer-dropout param.
# default: add_final_layer_norm=False,
num_beams=decoder_yml["beam-size"],
decoder_start_token_id=self.pad_token_id,
bad_words_ids=[[self.pad_token_id]],
max_length=512,
)
def _check_layer_entries(self):
self.encoder_l1 = self.sub_keys("encoder_l1")
self.decoder_l1 = self.sub_keys("decoder_l1")
self.decoder_l2 = self.sub_keys("decoder_l2")
if len(self.encoder_l1) != 16:
warnings.warn(f"Expected 16 keys for each encoder layer, got {len(self.encoder_l1)}")
if len(self.decoder_l1) != 26:
warnings.warn(f"Expected 26 keys for each decoder layer, got {len(self.decoder_l1)}")
if len(self.decoder_l2) != 26:
warnings.warn(f"Expected 26 keys for each decoder layer, got {len(self.decoder_l1)}")
@property
def extra_keys(self):
extra = []
for k in self.state_keys:
if (
k.startswith("encoder_l")
or k.startswith("decoder_l")
or k in [CONFIG_KEY, "Wemb", "encoder_Wemb", "decoder_Wemb", "Wpos", "decoder_ff_logit_out_b"]
):
continue
else:
extra.append(k)
return extra
def sub_keys(self, layer_prefix):
return [remove_prefix(k, layer_prefix) for k in self.state_dict if k.startswith(layer_prefix)]
def load_tokenizer(self):
# save tokenizer
add_special_tokens_to_vocab(self.source_dir, not self.share_encoder_decoder_embeddings)
return MarianTokenizer.from_pretrained(str(self.source_dir))
def load_marian_model(self) -> MarianMTModel:
state_dict, cfg = self.state_dict, self.hf_config
if not cfg.static_position_embeddings:
raise ValueError("config.static_position_embeddings should be True")
model = MarianMTModel(cfg)
if "hidden_size" in cfg.to_dict():
raise ValueError("hidden_size is in config")
load_layers_(
model.model.encoder.layers,
state_dict,
BART_CONVERTER,
)
load_layers_(model.model.decoder.layers, state_dict, BART_CONVERTER, is_decoder=True)
# handle tensors not associated with layers
if self.cfg["tied-embeddings-src"]:
wemb_tensor = nn.Parameter(torch.FloatTensor(self.wemb))
bias_tensor = nn.Parameter(torch.FloatTensor(self.final_bias))
model.model.shared.weight = wemb_tensor
model.model.encoder.embed_tokens = model.model.decoder.embed_tokens = model.model.shared
else:
wemb_tensor = nn.Parameter(torch.FloatTensor(self.wemb))
model.model.encoder.embed_tokens.weight = wemb_tensor
decoder_wemb_tensor = nn.Parameter(torch.FloatTensor(self.dec_wemb))
bias_tensor = nn.Parameter(torch.FloatTensor(self.final_bias))
model.model.decoder.embed_tokens.weight = decoder_wemb_tensor
# handle tied embeddings, otherwise "from_pretrained" loads them incorrectly
if self.cfg["tied-embeddings"]:
model.lm_head.weight.data = model.model.decoder.embed_tokens.weight.data.clone()
model.final_logits_bias = bias_tensor
if "Wpos" in state_dict:
print("Unexpected: got Wpos")
wpos_tensor = torch.tensor(state_dict["Wpos"])
model.model.encoder.embed_positions.weight = wpos_tensor
model.model.decoder.embed_positions.weight = wpos_tensor
if cfg.normalize_embedding:
if "encoder_emb_ln_scale_pre" not in state_dict:
raise ValueError("encoder_emb_ln_scale_pre is not in state dictionary")
raise NotImplementedError("Need to convert layernorm_embedding")
if self.extra_keys:
raise ValueError(f"Failed to convert {self.extra_keys}")
if model.get_input_embeddings().padding_idx != self.pad_token_id:
raise ValueError(
f"Padding tokens {model.get_input_embeddings().padding_idx} and {self.pad_token_id} mismatched"
)
return model
def download_and_unzip(url, dest_dir):
try:
import wget
except ImportError:
raise ImportError("you must pip install wget")
filename = wget.download(url)
unzip(filename, dest_dir)
os.remove(filename)
def convert(source_dir: Path, dest_dir):
dest_dir = Path(dest_dir)
dest_dir.mkdir(exist_ok=True)
opus_state = OpusState(source_dir)
# save tokenizer
opus_state.tokenizer.save_pretrained(dest_dir)
# save_json(opus_state.cfg, dest_dir / "marian_original_config.json")
# ^^ Uncomment to save human readable marian config for debugging
model = opus_state.load_marian_model()
model = model.half()
model.save_pretrained(dest_dir)
model.from_pretrained(dest_dir) # sanity check
def load_yaml(path):
import yaml
with open(path, encoding="utf-8") as f:
return yaml.load(f, Loader=yaml.BaseLoader)
def save_json(content: Union[dict, list], path: str) -> None:
with open(path, "w") as f:
json.dump(content, f)
def unzip(zip_path: str, dest_dir: str) -> None:
with ZipFile(zip_path, "r") as zipObj:
zipObj.extractall(dest_dir)
if __name__ == "__main__":
"""
Tatoeba conversion instructions in scripts/tatoeba/README.md
"""
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src",
type=str,
help="path to marian model sub dir. yaml.load will be used to load the configuration file, please be wary of which file you're loading.",
default="en-de",
)
parser.add_argument("--dest", type=str, default=None, help="Path to the output PyTorch model.")
args = parser.parse_args()
source_dir = Path(args.src)
if not source_dir.exists():
raise ValueError(f"Source directory {source_dir} not found")
dest_dir = f"converted-{source_dir.name}" if args.dest is None else args.dest
convert(source_dir, dest_dir)
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/marian/modeling_marian.py | src/transformers/models/marian/modeling_marian.py | # coding=utf-8
# Copyright 2021 The Marian Team Authors and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch MarianMTModel model, ported from the Marian C++ repo."""
import math
from collections.abc import Callable
from typing import Optional, Union
import numpy as np
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import initialization as init
from ...activations import ACT2FN
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from ...generation import GenerationMixin
from ...masking_utils import create_bidirectional_mask, create_causal_mask
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
Seq2SeqLMOutput,
Seq2SeqModelOutput,
)
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...utils import (
TransformersKwargs,
auto_docstring,
is_torchdynamo_compiling,
logging,
)
from .configuration_marian import MarianConfig
logger = logging.get_logger(__name__)
# Copied from transformers.models.bart.modeling_bart.shift_tokens_right
def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
"""
Shift input ids one token to the right.
"""
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
shifted_input_ids[:, 0] = decoder_start_token_id
if pad_token_id is None:
raise ValueError("self.model.config.pad_token_id has to be defined.")
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
return shifted_input_ids
class MarianSinusoidalPositionalEmbedding(nn.Embedding):
"""This module produces sinusoidal positional embeddings of any length."""
def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None) -> None:
super().__init__(num_positions, embedding_dim, _freeze=True)
def create_weight(self):
"""
Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
the 2nd half of the vector. [dim // 2:]
"""
n_pos, dim = self.weight.shape
position_enc = np.array(
[[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]
)
out = torch.empty(n_pos, dim, dtype=self.weight.dtype, requires_grad=False)
sentinel = dim // 2 if dim % 2 == 0 else (dim // 2) + 1
out[:, 0:sentinel] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
out[:, sentinel:] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
return out
@torch.no_grad()
def forward(
self, input_ids_shape: torch.Size, past_key_values_length: int = 0, position_ids: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
if position_ids is None:
bsz, seq_len = input_ids_shape[:2]
position_ids = torch.arange(
past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
)
return super().forward(position_ids)
# Copied from transformers.models.bert.modeling_bert.eager_attention_forward
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: Optional[float] = None,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
if scaling is None:
scaling = query.size(-1) ** -0.5
# Take the dot product between "query" and "key" to get the raw attention scores.
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None:
attention_mask = attention_mask[:, :, :, : key.shape[-2]]
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->Marian
class MarianAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
is_causal: bool = False,
config: Optional[MarianConfig] = None,
layer_idx: Optional[int] = None,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
self.config = config
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.is_causal = is_causal
self.layer_idx = layer_idx
if layer_idx is None and self.is_decoder:
logger.warning_once(
f"Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and "
"will lead to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
"when creating this class."
)
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
cache_position: Optional[torch.Tensor] = None,
# TODO: we need a refactor so that the different attention modules can get their specific kwargs
# ATM, we have mixed things encoder, decoder, and encoder-decoder attn
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
# determine input shapes
bsz, tgt_len = hidden_states.shape[:-1]
src_len = key_value_states.shape[1] if is_cross_attention else tgt_len
q_input_shape = (bsz, tgt_len, -1, self.head_dim)
kv_input_shape = (bsz, src_len, -1, self.head_dim)
# get query proj
query_states = self.q_proj(hidden_states).view(*q_input_shape).transpose(1, 2)
is_updated = False
if past_key_values is not None:
if isinstance(past_key_values, EncoderDecoderCache):
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
# after the first generated id, we can subsequently re-use all key/value_states from cache
curr_past_key_values = past_key_values.cross_attention_cache
else:
curr_past_key_values = past_key_values.self_attention_cache
else:
curr_past_key_values = past_key_values
current_states = key_value_states if is_cross_attention else hidden_states
if is_cross_attention and past_key_values is not None and is_updated:
# reuse k,v, cross_attentions
key_states = curr_past_key_values.layers[self.layer_idx].keys
value_states = curr_past_key_values.layers[self.layer_idx].values
else:
key_states = self.k_proj(current_states)
value_states = self.v_proj(current_states)
key_states = key_states.view(*kv_input_shape).transpose(1, 2)
value_states = value_states.view(*kv_input_shape).transpose(1, 2)
if past_key_values is not None:
# save all key/value_states to cache to be re-used for fast auto-regressive generation
cache_position = cache_position if not is_cross_attention else None
key_states, value_states = curr_past_key_values.update(
key_states, value_states, self.layer_idx, {"cache_position": cache_position}
)
# set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls
if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache):
past_key_values.is_updated[self.layer_idx] = True
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.dropout,
scaling=self.scaling,
output_attentions=output_attentions,
**kwargs,
)
attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous()
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights
# Copied from transformers.models.bart.modeling_bart.BartEncoderLayer with Bart->Marian, BART->MARIAN
class MarianEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: MarianConfig, layer_idx: Optional[int] = None):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = MarianAttention(
embed_dim=self.embed_dim,
num_heads=config.encoder_attention_heads,
dropout=config.attention_dropout,
config=config,
layer_idx=layer_idx,
)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.FloatTensor,
attention_mask: torch.FloatTensor,
output_attentions: Optional[bool] = False,
) -> tuple[torch.FloatTensor, Optional[torch.FloatTensor]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
if hidden_states.dtype == torch.float16 and (
torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
):
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Copied from transformers.models.bart.modeling_bart.BartDecoderLayer with Bart->Marian, BART->MARIAN
class MarianDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: MarianConfig, layer_idx: Optional[int] = None):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = MarianAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
is_causal=True,
config=config,
layer_idx=layer_idx,
)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.encoder_attn = MarianAttention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
config=config,
layer_idx=layer_idx,
)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = True,
cache_position: Optional[torch.Tensor] = None,
) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
past_key_values (`Cache`): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence. It is used to update the
cache in the correct position and to infer the complete sequence length.
"""
residual = hidden_states
# Self Attention
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states,
past_key_values=past_key_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
cache_position=cache_position,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Cross-Attention Block
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states, cross_attn_weights = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
output_attentions=output_attentions,
cache_position=cache_position,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# Fully Connected
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
return outputs
@auto_docstring
class MarianPreTrainedModel(PreTrainedModel):
config: MarianConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, MarianSinusoidalPositionalEmbedding):
init.copy_(module.weight, module.create_weight())
elif isinstance(module, MarianMTModel):
init.zeros_(module.final_logits_bias)
@property
def dummy_inputs(self):
pad_token = self.config.pad_token_id
input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device)
dummy_inputs = {
"attention_mask": input_ids.ne(pad_token),
"input_ids": input_ids,
"decoder_input_ids": input_ids,
}
return dummy_inputs
class MarianEncoder(MarianPreTrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`MarianEncoderLayer`].
Args:
config: MarianConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: MarianConfig):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
embed_dim = config.d_model
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)
self.embed_positions = MarianSinusoidalPositionalEmbedding(
config.max_position_embeddings, embed_dim, self.padding_idx
)
self.layers = nn.ModuleList([MarianEncoderLayer(config) for _ in range(config.encoder_layers)])
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**kwargs,
) -> Union[tuple[torch.Tensor], BaseModelOutput]:
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
embed_pos = self.embed_positions(input_shape)
hidden_states = inputs_embeds + embed_pos
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
attention_mask = create_bidirectional_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://huggingface.co/papers/1909.11556 for description)
to_drop = False
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop: # skip the layer
to_drop = True
if to_drop:
layer_outputs = (None, None)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
class MarianDecoder(MarianPreTrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`MarianDecoderLayer`]
Args:
config: MarianConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: MarianConfig):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = config.pad_token_id
self.max_target_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
self.embed_tokens = nn.Embedding(config.decoder_vocab_size, config.d_model, self.padding_idx)
self.embed_positions = MarianSinusoidalPositionalEmbedding(
config.max_position_embeddings, config.d_model, self.padding_idx
)
self.layers = nn.ModuleList([MarianDecoderLayer(config, layer_idx=i) for i in range(config.decoder_layers)])
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs,
) -> Union[tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence. It is used to update the
cache in the correct position and to infer the complete sequence length.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
# retrieve input_ids and inputs_embeds
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | true |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/marian/tokenization_marian.py | src/transformers/models/marian/tokenization_marian.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import warnings
from pathlib import Path
from shutil import copyfile
from typing import Any, Optional, Union
import sentencepiece
from ...tokenization_python import PreTrainedTokenizer
from ...utils import logging
from ...utils.import_utils import requires
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {
"source_spm": "source.spm",
"target_spm": "target.spm",
"vocab": "vocab.json",
"target_vocab_file": "target_vocab.json",
"tokenizer_config_file": "tokenizer_config.json",
}
SPIECE_UNDERLINE = "▁"
# Example URL https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/vocab.json
@requires(backends=("sentencepiece",))
class MarianTokenizer(PreTrainedTokenizer):
r"""
Construct a Marian tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
source_spm (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that
contains the vocabulary for the source language.
target_spm (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm extension) that
contains the vocabulary for the target language.
source_lang (`str`, *optional*):
A string representing the source language.
target_lang (`str`, *optional*):
A string representing the target language.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
model_max_length (`int`, *optional*, defaults to 512):
The maximum sentence length the model accepts.
additional_special_tokens (`list[str]`, *optional*, defaults to `["<eop>", "<eod>"]`):
Additional special tokens used by the tokenizer.
sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
Examples:
```python
>>> from transformers import MarianForCausalLM, MarianTokenizer
>>> model = MarianForCausalLM.from_pretrained("Helsinki-NLP/opus-mt-en-de")
>>> tokenizer = MarianTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-de")
>>> src_texts = ["I am a small frog.", "Tom asked his teacher for advice."]
>>> tgt_texts = ["Ich bin ein kleiner Frosch.", "Tom bat seinen Lehrer um Rat."] # optional
>>> inputs = tokenizer(src_texts, text_target=tgt_texts, return_tensors="pt", padding=True)
>>> outputs = model(**inputs) # should work
```"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
def __init__(
self,
source_spm,
target_spm,
vocab,
target_vocab_file=None,
source_lang=None,
target_lang=None,
unk_token="<unk>",
eos_token="</s>",
pad_token="<pad>",
model_max_length=512,
sp_model_kwargs: Optional[dict[str, Any]] = None,
separate_vocabs=False,
**kwargs,
) -> None:
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
assert Path(source_spm).exists(), f"cannot find spm source {source_spm}"
self.separate_vocabs = separate_vocabs
self.encoder = load_json(vocab)
if str(unk_token) not in self.encoder:
raise KeyError("<unk> token must be in the vocab")
if separate_vocabs:
self.target_encoder = load_json(target_vocab_file)
self.decoder = {v: k for k, v in self.target_encoder.items()}
self.supported_language_codes = []
else:
self.decoder = {v: k for k, v in self.encoder.items()}
self.supported_language_codes: list = [k for k in self.encoder if k.startswith(">>") and k.endswith("<<")]
self.source_lang = source_lang
self.target_lang = target_lang
self.spm_files = [source_spm, target_spm]
# load SentencePiece model for pre-processing
self.spm_source = load_spm(source_spm, self.sp_model_kwargs)
self.spm_target = load_spm(target_spm, self.sp_model_kwargs)
self.current_spm = self.spm_source
self.current_encoder = self.encoder
# Multilingual target side: default to using first supported language code.
self._setup_normalizer()
self._decode_use_source_tokenizer = False
super().__init__(
# bos_token=bos_token, unused. Start decoding with config.decoder_start_token_id
source_lang=source_lang,
target_lang=target_lang,
unk_token=unk_token,
eos_token=eos_token,
pad_token=pad_token,
model_max_length=model_max_length,
sp_model_kwargs=self.sp_model_kwargs,
target_vocab_file=target_vocab_file,
separate_vocabs=separate_vocabs,
**kwargs,
)
def _setup_normalizer(self):
try:
from sacremoses import MosesPunctNormalizer
self.punc_normalizer = MosesPunctNormalizer(self.source_lang).normalize
except (ImportError, FileNotFoundError):
warnings.warn("Recommended: pip install sacremoses.")
self.punc_normalizer = lambda x: x
def normalize(self, x: str) -> str:
"""Cover moses empty string edge case. They return empty list for '' input!"""
return self.punc_normalizer(x) if x else ""
def _convert_token_to_id(self, token):
if token in self.current_encoder:
return self.current_encoder[token]
# The Marian vocab is not aligned with the SentencePiece IDs, so falling back to raw
# SentencePiece indices would map to unrelated tokens. Treat such pieces as unknown.
return self.current_encoder[self.unk_token]
def remove_language_code(self, text: str):
"""Remove language codes like >>fr<< before sentencepiece"""
code = []
if text.startswith(">>") and (end_loc := text.find("<<")) != -1:
code.append(text[: end_loc + 2])
text = text[end_loc + 2 :]
return code, text
def _tokenize(self, text: str) -> list[str]:
code, text = self.remove_language_code(text)
pieces = self.current_spm.encode(text, out_type=str)
return code + pieces
def _convert_id_to_token(self, index: int) -> str:
"""Converts an index (integer) in a token (str) using the decoder."""
if index in self.decoder:
return self.decoder[index]
# Fall back to SPM model for IDs not in external vocab
spm_model = self.spm_source if self._decode_use_source_tokenizer else self.spm_target
piece = spm_model.IdToPiece(index)
return piece if piece else self.unk_token
def batch_decode(self, sequences, **kwargs):
"""
Convert a list of lists of token ids into a list of strings by calling decode.
Args:
sequences (`Union[list[int], list[list[int]], np.ndarray, torch.Tensor]`):
List of tokenized input ids. Can be obtained using the `__call__` method.
skip_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to remove special tokens in the decoding.
clean_up_tokenization_spaces (`bool`, *optional*):
Whether or not to clean up the tokenization spaces. If `None`, will default to
`self.clean_up_tokenization_spaces` (available in the `tokenizer_config`).
use_source_tokenizer (`bool`, *optional*, defaults to `False`):
Whether or not to use the source tokenizer to decode sequences (only applicable in sequence-to-sequence
problems).
kwargs (additional keyword arguments, *optional*):
Will be passed to the underlying model specific decode method.
Returns:
`list[str]`: The list of decoded sentences.
"""
return super().batch_decode(sequences, **kwargs)
def decode(self, token_ids, **kwargs):
"""
Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special
tokens and clean up tokenization spaces.
Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`.
Args:
token_ids (`Union[int, list[int], np.ndarray, torch.Tensor]`):
List of tokenized input ids. Can be obtained using the `__call__` method.
skip_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to remove special tokens in the decoding.
clean_up_tokenization_spaces (`bool`, *optional*):
Whether or not to clean up the tokenization spaces. If `None`, will default to
`self.clean_up_tokenization_spaces` (available in the `tokenizer_config`).
use_source_tokenizer (`bool`, *optional*, defaults to `False`):
Whether or not to use the source tokenizer to decode sequences (only applicable in sequence-to-sequence
problems).
kwargs (additional keyword arguments, *optional*):
Will be passed to the underlying model specific decode method.
Returns:
`str`: The decoded sentence.
"""
return super().decode(token_ids, **kwargs)
def _decode(
self,
token_ids,
skip_special_tokens: bool = False,
clean_up_tokenization_spaces: Optional[bool] = None,
**kwargs,
) -> str:
"""Internal decode method that handles use_source_tokenizer parameter."""
default_use_source = not self.separate_vocabs
self._decode_use_source_tokenizer = kwargs.pop("use_source_tokenizer", default_use_source)
return super()._decode(
token_ids=token_ids,
skip_special_tokens=skip_special_tokens,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
**kwargs,
)
def convert_tokens_to_string(self, tokens: list[str]) -> str:
"""Uses source spm if _decode_use_source_tokenizer is True, and target spm otherwise"""
sp_model = self.spm_source if self._decode_use_source_tokenizer else self.spm_target
current_sub_tokens = []
out_string = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += sp_model.decode_pieces(current_sub_tokens) + token + " "
current_sub_tokens = []
else:
current_sub_tokens.append(token)
out_string += sp_model.decode_pieces(current_sub_tokens)
out_string = out_string.replace(SPIECE_UNDERLINE, " ")
return out_string.strip()
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> list[int]:
"""Build model inputs from a sequence by appending eos_token_id."""
if token_ids_1 is None:
return token_ids_0 + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_0 + token_ids_1 + [self.eos_token_id]
def _switch_to_input_mode(self):
self.current_spm = self.spm_source
self.current_encoder = self.encoder
def _switch_to_target_mode(self):
self.current_spm = self.spm_target
if self.separate_vocabs:
self.current_encoder = self.target_encoder
@property
def vocab_size(self) -> int:
return len(self.encoder)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
saved_files = []
if self.separate_vocabs:
out_src_vocab_file = os.path.join(
save_directory,
(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab"],
)
out_tgt_vocab_file = os.path.join(
save_directory,
(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["target_vocab_file"],
)
save_json(self.encoder, out_src_vocab_file)
save_json(self.target_encoder, out_tgt_vocab_file)
saved_files.append(out_src_vocab_file)
saved_files.append(out_tgt_vocab_file)
else:
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab"]
)
save_json(self.encoder, out_vocab_file)
saved_files.append(out_vocab_file)
for spm_save_filename, spm_orig_path, spm_model in zip(
[VOCAB_FILES_NAMES["source_spm"], VOCAB_FILES_NAMES["target_spm"]],
self.spm_files,
[self.spm_source, self.spm_target],
):
spm_save_path = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + spm_save_filename
)
if os.path.abspath(spm_orig_path) != os.path.abspath(spm_save_path) and os.path.isfile(spm_orig_path):
copyfile(spm_orig_path, spm_save_path)
saved_files.append(spm_save_path)
elif not os.path.isfile(spm_orig_path):
with open(spm_save_path, "wb") as fi:
content_spiece_model = spm_model.serialized_model_proto()
fi.write(content_spiece_model)
saved_files.append(spm_save_path)
return tuple(saved_files)
def get_vocab(self) -> dict:
return self.get_src_vocab()
def get_src_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
def get_tgt_vocab(self):
return dict(self.target_encoder, **self.added_tokens_decoder)
def __getstate__(self) -> dict:
state = self.__dict__.copy()
state.update(
dict.fromkeys(["spm_source", "spm_target", "current_spm", "punc_normalizer", "target_vocab_file"])
)
return state
def __setstate__(self, d: dict) -> None:
self.__dict__ = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs"):
self.sp_model_kwargs = {}
if not hasattr(self, "_decode_use_source_tokenizer"):
self._decode_use_source_tokenizer = False
self.spm_source, self.spm_target = (load_spm(f, self.sp_model_kwargs) for f in self.spm_files)
self.current_spm = self.spm_source
self._setup_normalizer()
def num_special_tokens_to_add(self, *args, **kwargs):
"""Just EOS"""
return 1
def _special_token_mask(self, seq):
all_special_ids = set(self.all_special_ids) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def get_special_tokens_mask(
self, token_ids_0: list, token_ids_1: Optional[list] = None, already_has_special_tokens: bool = False
) -> list[int]:
"""Get list where entries are [1] if a token is [eos] or [pad] else 0."""
if already_has_special_tokens:
return self._special_token_mask(token_ids_0)
elif token_ids_1 is None:
return self._special_token_mask(token_ids_0) + [1]
else:
return self._special_token_mask(token_ids_0 + token_ids_1) + [1]
def load_spm(path: str, sp_model_kwargs: dict[str, Any]) -> sentencepiece.SentencePieceProcessor:
spm = sentencepiece.SentencePieceProcessor(**sp_model_kwargs)
spm.Load(path)
return spm
def save_json(data, path: str) -> None:
with open(path, "w") as f:
json.dump(data, f, indent=2)
def load_json(path: str) -> Union[dict, list]:
with open(path, "r") as f:
return json.load(f)
__all__ = ["MarianTokenizer"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/marian/convert_marian_tatoeba_to_pytorch.py | src/transformers/models/marian/convert_marian_tatoeba_to_pytorch.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import datetime
import json
import os
import re
from pathlib import Path
import yaml
from tqdm import tqdm
from transformers.models.marian.convert_marian_to_pytorch import (
FRONT_MATTER_TEMPLATE,
convert,
convert_opus_name_to_hf_name,
download_and_unzip,
get_system_metadata,
)
DEFAULT_REPO = "Tatoeba-Challenge"
DEFAULT_MODEL_DIR = os.path.join(DEFAULT_REPO, "models")
ISO_URL = "https://cdn-datasets.huggingface.co/language_codes/iso-639-3.csv"
ISO_PATH = "lang_code_data/iso-639-3.csv"
LANG_CODE_PATH = "lang_code_data/language-codes-3b2.csv"
TATOEBA_MODELS_URL = "https://object.pouta.csc.fi/Tatoeba-MT-models"
class TatoebaConverter:
"""
Convert Tatoeba-Challenge models to huggingface format.
Steps:
1. Convert numpy state dict to hf format (same code as OPUS-MT-Train conversion).
2. Rename opus model to huggingface format. This means replace each alpha3 code with an alpha2 code if a unique
one exists. e.g. aav-eng -> aav-en, heb-eng -> he-en
3. Select the best model for a particular pair, parse the yml for it and write a model card. By default the
best model is the one listed first in released-model-results, but it's also possible to specify the most
recent one.
"""
def __init__(self, save_dir="marian_converted"):
assert Path(DEFAULT_REPO).exists(), "need git clone git@github.com:Helsinki-NLP/Tatoeba-Challenge.git"
self.download_lang_info()
self.model_results = json.load(open("Tatoeba-Challenge/models/released-model-results.json"))
self.alpha3_to_alpha2 = {}
for line in open(ISO_PATH):
parts = line.split("\t")
if len(parts[0]) == 3 and len(parts[3]) == 2:
self.alpha3_to_alpha2[parts[0]] = parts[3]
for line in LANG_CODE_PATH:
parts = line.split(",")
if len(parts[0]) == 3 and len(parts[1]) == 2:
self.alpha3_to_alpha2[parts[0]] = parts[1]
self.model_card_dir = Path(save_dir)
self.tag2name = {}
for key, value in GROUP_MEMBERS.items():
self.tag2name[key] = value[0]
def convert_models(self, tatoeba_ids, dry_run=False):
models_to_convert = [self.parse_metadata(x) for x in tatoeba_ids]
save_dir = Path("marian_ckpt")
dest_dir = Path(self.model_card_dir)
dest_dir.mkdir(exist_ok=True)
for model in tqdm(models_to_convert): # k, prepro, download, test_set_url in tqdm(model_list):
if "SentencePiece" not in model["pre-processing"]:
print(f"Skipping {model['release']} because it doesn't appear to use SentencePiece")
continue
if not os.path.exists(save_dir / model["_name"]):
download_and_unzip(f"{TATOEBA_MODELS_URL}/{model['release']}", save_dir / model["_name"])
# from convert_marian_to_pytorch
opus_language_groups_to_hf = convert_opus_name_to_hf_name
pair_name = opus_language_groups_to_hf(model["_name"])
convert(save_dir / model["_name"], dest_dir / f"opus-mt-{pair_name}")
self.write_model_card(model, dry_run=dry_run)
def expand_group_to_two_letter_codes(self, grp_name):
return [self.alpha3_to_alpha2.get(x, x) for x in GROUP_MEMBERS[grp_name][1]]
def is_group(self, code, name):
return "languages" in name or len(GROUP_MEMBERS.get(code, [])) > 1
def get_tags(self, code, name):
if len(code) == 2:
assert "languages" not in name, f"{code}: {name}"
return [code]
elif self.is_group(code, name):
group = self.expand_group_to_two_letter_codes(code)
group.append(code)
return group
else: # zho-> zh
print(f"Three letter monolingual code: {code}")
return [code]
def resolve_lang_code(self, src, tgt) -> tuple[str, str]:
src_tags = self.get_tags(src, self.tag2name[src])
tgt_tags = self.get_tags(tgt, self.tag2name[tgt])
return src_tags, tgt_tags
@staticmethod
def model_type_info_from_model_name(name):
info = {"_has_backtranslated_data": False}
if "1m" in name:
info["_data_per_pair"] = str(1e6)
if "2m" in name:
info["_data_per_pair"] = str(2e6)
if "4m" in name:
info["_data_per_pair"] = str(4e6)
if "+bt" in name:
info["_has_backtranslated_data"] = True
if "tuned4" in name:
info["_tuned"] = re.search(r"tuned4[^-]+", name).group()
return info
def write_model_card(self, model_dict, dry_run=False) -> str:
"""
Construct card from data parsed from YAML and the model's name. upload command: aws s3 sync model_card_dir
s3://models.huggingface.co/bert/Helsinki-NLP/ --dryrun
"""
model_dir_url = f"{TATOEBA_MODELS_URL}/{model_dict['release']}"
long_pair = model_dict["_name"].split("-")
assert len(long_pair) == 2, f"got a translation pair {model_dict['_name']} that doesn't appear to be a pair"
short_src = self.alpha3_to_alpha2.get(long_pair[0], long_pair[0])
short_tgt = self.alpha3_to_alpha2.get(long_pair[1], long_pair[1])
model_dict["_hf_model_id"] = f"opus-mt-{short_src}-{short_tgt}"
a3_src, a3_tgt = model_dict["_name"].split("-")
# opus_src_tags, opus_tgt_tags = a3_src.split("+"), a3_tgt.split("+")
# This messy part tries to deal with language tags in multilingual models, possibly
# not all having three-letter codes
resolved_src_tags, resolved_tgt_tags = self.resolve_lang_code(a3_src, a3_tgt)
a2_src_tags, a2_tgt_tags = [], []
for tag in resolved_src_tags:
if tag not in self.alpha3_to_alpha2:
a2_src_tags.append(tag)
for tag in resolved_tgt_tags:
if tag not in self.alpha3_to_alpha2:
a2_tgt_tags.append(tag)
lang_tags = dedup(a2_src_tags + a2_tgt_tags)
src_multilingual, tgt_multilingual = (len(a2_src_tags) > 1), (len(a2_tgt_tags) > 1)
s, t = ",".join(a2_src_tags), ",".join(a2_tgt_tags)
metadata = {
"hf_name": model_dict["_name"],
"source_languages": s,
"target_languages": t,
"opus_readme_url": f"{model_dir_url}/README.md",
"original_repo": "Tatoeba-Challenge",
"tags": ["translation"],
"languages": lang_tags,
}
lang_tags = l2front_matter(lang_tags)
metadata["src_constituents"] = list(GROUP_MEMBERS[a3_src][1])
metadata["tgt_constituents"] = list(GROUP_MEMBERS[a3_tgt][1])
metadata["src_multilingual"] = src_multilingual
metadata["tgt_multilingual"] = tgt_multilingual
backtranslated_data = ""
if model_dict["_has_backtranslated_data"]:
backtranslated_data = " with backtranslations"
multilingual_data = ""
if "_data_per_pair" in model_dict:
multilingual_data = f"* data per pair in multilingual model: {model_dict['_data_per_pair']}\n"
tuned = ""
if "_tuned" in model_dict:
tuned = f"* multilingual model tuned for: {model_dict['_tuned']}\n"
model_base_filename = model_dict["release"].split("/")[-1]
download = f"* download original weights: [{model_base_filename}]({model_dir_url}/{model_dict['release']})\n"
langtoken = ""
if tgt_multilingual:
langtoken = (
"* a sentence-initial language token is required in the form of >>id<<"
"(id = valid, usually three-letter target language ID)\n"
)
metadata.update(get_system_metadata(DEFAULT_REPO))
scorestable = ""
for k, v in model_dict.items():
if "scores" in k:
this_score_table = f"* {k}\n|Test set|score|\n|---|---|\n"
pairs = sorted(v.items(), key=lambda x: x[1], reverse=True)
for pair in pairs:
this_score_table += f"|{pair[0]}|{pair[1]}|\n"
scorestable += this_score_table
datainfo = ""
if "training-data" in model_dict:
datainfo += "* Training data: \n"
for k, v in model_dict["training-data"].items():
datainfo += f" * {str(k)}: {str(v)}\n"
if "validation-data" in model_dict:
datainfo += "* Validation data: \n"
for k, v in model_dict["validation-data"].items():
datainfo += f" * {str(k)}: {str(v)}\n"
if "test-data" in model_dict:
datainfo += "* Test data: \n"
for k, v in model_dict["test-data"].items():
datainfo += f" * {str(k)}: {str(v)}\n"
testsetfilename = model_dict["release"].replace(".zip", ".test.txt")
testscoresfilename = model_dict["release"].replace(".zip", ".eval.txt")
testset = f"* test set translations file: [test.txt]({model_dir_url}/{testsetfilename})\n"
testscores = f"* test set scores file: [eval.txt]({model_dir_url}/{testscoresfilename})\n"
# combine with Tatoeba markdown
readme_url = f"{TATOEBA_MODELS_URL}/{model_dict['_name']}/README.md"
extra_markdown = f"""
### {model_dict["_name"]}
* source language name: {self.tag2name[a3_src]}
* target language name: {self.tag2name[a3_tgt]}
* OPUS readme: [README.md]({readme_url})
"""
content = (
f"""
* model: {model_dict["modeltype"]}
* source language code{src_multilingual * "s"}: {", ".join(a2_src_tags)}
* target language code{tgt_multilingual * "s"}: {", ".join(a2_tgt_tags)}
* dataset: opus {backtranslated_data}
* release date: {model_dict["release-date"]}
* pre-processing: {model_dict["pre-processing"]}
"""
+ multilingual_data
+ tuned
+ download
+ langtoken
+ datainfo
+ testset
+ testscores
+ scorestable
)
content = FRONT_MATTER_TEMPLATE.format(lang_tags) + extra_markdown + content
items = "\n".join([f"* {k}: {v}" for k, v in metadata.items()])
sec3 = "\n### System Info: \n" + items
content += sec3
if dry_run:
print("CONTENT:")
print(content)
print("METADATA:")
print(metadata)
return
sub_dir = self.model_card_dir / model_dict["_hf_model_id"]
sub_dir.mkdir(exist_ok=True)
dest = sub_dir / "README.md"
dest.open("w").write(content)
for k, v in metadata.items():
if isinstance(v, datetime.date):
metadata[k] = datetime.datetime.strftime(v, "%Y-%m-%d")
with open(sub_dir / "metadata.json", "w", encoding="utf-8") as writeobj:
json.dump(metadata, writeobj)
def download_lang_info(self):
global LANG_CODE_PATH
Path(LANG_CODE_PATH).parent.mkdir(exist_ok=True)
import wget
from huggingface_hub import hf_hub_download
if not os.path.exists(ISO_PATH):
wget.download(ISO_URL, ISO_PATH)
if not os.path.exists(LANG_CODE_PATH):
LANG_CODE_PATH = hf_hub_download(
repo_id="huggingface/language_codes_marianMT", filename="language-codes-3b2.csv", repo_type="dataset"
)
def parse_metadata(self, model_name, repo_path=DEFAULT_MODEL_DIR, method="best"):
p = Path(repo_path) / model_name
def url_to_name(url):
return url.split("/")[-1].split(".")[0]
if model_name not in self.model_results:
# This is not a language pair, so model results are ambiguous, go by newest
method = "newest"
if method == "best":
# Sort by how early they appear in released-models-results
results = [url_to_name(model["download"]) for model in self.model_results[model_name]]
ymls = [f for f in os.listdir(p) if f.endswith(".yml") and f[:-4] in results]
ymls.sort(key=lambda x: results.index(x[:-4]))
metadata = yaml.safe_load(open(p / ymls[0]))
metadata.update(self.model_type_info_from_model_name(ymls[0][:-4]))
elif method == "newest":
ymls = [f for f in os.listdir(p) if f.endswith(".yml")]
# Sort by date
ymls.sort(
key=lambda x: datetime.datetime.strptime(re.search(r"\d\d\d\d-\d\d?-\d\d?", x).group(), "%Y-%m-%d")
)
metadata = yaml.safe_load(open(p / ymls[-1]))
metadata.update(self.model_type_info_from_model_name(ymls[-1][:-4]))
else:
raise NotImplementedError(f"Don't know argument method='{method}' to parse_metadata()")
metadata["_name"] = model_name
return metadata
GROUP_MEMBERS = {
# three letter code -> (group/language name, {constituents...}
# if this language is on the target side the constituents can be used as target language codes.
# if the language is on the source side they are supported natively without special codes.
"aav": ("Austro-Asiatic languages", {"hoc", "hoc_Latn", "kha", "khm", "khm_Latn", "mnw", "vie", "vie_Hani"}),
"afa": (
"Afro-Asiatic languages",
{
"acm",
"afb",
"amh",
"apc",
"ara",
"arq",
"ary",
"arz",
"hau_Latn",
"heb",
"kab",
"mlt",
"rif_Latn",
"shy_Latn",
"som",
"thv",
"tir",
},
),
"afr": ("Afrikaans", {"afr"}),
"alv": (
"Atlantic-Congo languages",
{
"ewe",
"fuc",
"fuv",
"ibo",
"kin",
"lin",
"lug",
"nya",
"run",
"sag",
"sna",
"swh",
"toi_Latn",
"tso",
"umb",
"wol",
"xho",
"yor",
"zul",
},
),
"ara": ("Arabic", {"afb", "apc", "apc_Latn", "ara", "ara_Latn", "arq", "arq_Latn", "arz"}),
"art": (
"Artificial languages",
{
"afh_Latn",
"avk_Latn",
"dws_Latn",
"epo",
"ido",
"ido_Latn",
"ile_Latn",
"ina_Latn",
"jbo",
"jbo_Cyrl",
"jbo_Latn",
"ldn_Latn",
"lfn_Cyrl",
"lfn_Latn",
"nov_Latn",
"qya",
"qya_Latn",
"sjn_Latn",
"tlh_Latn",
"tzl",
"tzl_Latn",
"vol_Latn",
},
),
"aze": ("Azerbaijani", {"aze_Latn"}),
"bat": ("Baltic languages", {"lit", "lav", "prg_Latn", "ltg", "sgs"}),
"bel": ("Belarusian", {"bel", "bel_Latn"}),
"ben": ("Bengali", {"ben"}),
"bnt": (
"Bantu languages",
{"kin", "lin", "lug", "nya", "run", "sna", "swh", "toi_Latn", "tso", "umb", "xho", "zul"},
),
"bul": ("Bulgarian", {"bul", "bul_Latn"}),
"cat": ("Catalan", {"cat"}),
"cau": ("Caucasian languages", {"abk", "kat", "che", "ady"}),
"ccs": ("South Caucasian languages", {"kat"}),
"ceb": ("Cebuano", {"ceb"}),
"cel": ("Celtic languages", {"gla", "gle", "bre", "cor", "glv", "cym"}),
"ces": ("Czech", {"ces"}),
"cpf": ("Creoles and pidgins, French‑based", {"gcf_Latn", "hat", "mfe"}),
"cpp": (
"Creoles and pidgins, Portuguese-based",
{"zsm_Latn", "ind", "pap", "min", "tmw_Latn", "max_Latn", "zlm_Latn"},
),
"cus": ("Cushitic languages", {"som"}),
"dan": ("Danish", {"dan"}),
"deu": ("German", {"deu"}),
"dra": ("Dravidian languages", {"tam", "kan", "mal", "tel"}),
"ell": ("Modern Greek (1453-)", {"ell"}),
"eng": ("English", {"eng"}),
"epo": ("Esperanto", {"epo"}),
"est": ("Estonian", {"est"}),
"euq": ("Basque (family)", {"eus"}),
"eus": ("Basque", {"eus"}),
"fin": ("Finnish", {"fin"}),
"fiu": (
"Finno-Ugrian languages",
{
"est",
"fin",
"fkv_Latn",
"hun",
"izh",
"kpv",
"krl",
"liv_Latn",
"mdf",
"mhr",
"myv",
"sma",
"sme",
"udm",
"vep",
"vro",
},
),
"fra": ("French", {"fra"}),
"gem": (
"Germanic languages",
{
"afr",
"ang_Latn",
"dan",
"deu",
"eng",
"enm_Latn",
"fao",
"frr",
"fry",
"gos",
"got_Goth",
"gsw",
"isl",
"ksh",
"ltz",
"nds",
"nld",
"nno",
"nob",
"nob_Hebr",
"non_Latn",
"pdc",
"sco",
"stq",
"swe",
"swg",
"yid",
},
),
"gle": ("Irish", {"gle"}),
"glg": ("Galician", {"glg"}),
"gmq": ("North Germanic languages", {"dan", "nob", "nob_Hebr", "swe", "isl", "nno", "non_Latn", "fao"}),
"gmw": (
"West Germanic languages",
{
"afr",
"ang_Latn",
"deu",
"eng",
"enm_Latn",
"frr",
"fry",
"gos",
"gsw",
"ksh",
"ltz",
"nds",
"nld",
"pdc",
"sco",
"stq",
"swg",
"yid",
},
),
"grk": ("Greek languages", {"grc_Grek", "ell"}),
"hbs": ("Serbo-Croatian", {"hrv", "srp_Cyrl", "bos_Latn", "srp_Latn"}),
"heb": ("Hebrew", {"heb"}),
"hin": ("Hindi", {"hin"}),
"hun": ("Hungarian", {"hun"}),
"hye": ("Armenian", {"hye", "hye_Latn"}),
"iir": (
"Indo-Iranian languages",
{
"asm",
"awa",
"ben",
"bho",
"gom",
"guj",
"hif_Latn",
"hin",
"jdt_Cyrl",
"kur_Arab",
"kur_Latn",
"mai",
"mar",
"npi",
"ori",
"oss",
"pan_Guru",
"pes",
"pes_Latn",
"pes_Thaa",
"pnb",
"pus",
"rom",
"san_Deva",
"sin",
"snd_Arab",
"tgk_Cyrl",
"tly_Latn",
"urd",
"zza",
},
),
"ilo": ("Iloko", {"ilo"}),
"inc": (
"Indic languages",
{
"asm",
"awa",
"ben",
"bho",
"gom",
"guj",
"hif_Latn",
"hin",
"mai",
"mar",
"npi",
"ori",
"pan_Guru",
"pnb",
"rom",
"san_Deva",
"sin",
"snd_Arab",
"urd",
},
),
"ine": (
"Indo-European languages",
{
"afr",
"afr_Arab",
"aln",
"ang_Latn",
"arg",
"asm",
"ast",
"awa",
"bel",
"bel_Latn",
"ben",
"bho",
"bjn",
"bos_Latn",
"bre",
"bul",
"bul_Latn",
"cat",
"ces",
"cor",
"cos",
"csb_Latn",
"cym",
"dan",
"deu",
"dsb",
"egl",
"ell",
"eng",
"enm_Latn",
"ext",
"fao",
"fra",
"frm_Latn",
"frr",
"fry",
"gcf_Latn",
"gla",
"gle",
"glg",
"glv",
"gom",
"gos",
"got_Goth",
"grc_Grek",
"gsw",
"guj",
"hat",
"hif_Latn",
"hin",
"hrv",
"hsb",
"hye",
"hye_Latn",
"ind",
"isl",
"ita",
"jdt_Cyrl",
"ksh",
"kur_Arab",
"kur_Latn",
"lad",
"lad_Latn",
"lat_Grek",
"lat_Latn",
"lav",
"lij",
"lit",
"lld_Latn",
"lmo",
"ltg",
"ltz",
"mai",
"mar",
"max_Latn",
"mfe",
"min",
"mkd",
"mwl",
"nds",
"nld",
"nno",
"nob",
"nob_Hebr",
"non_Latn",
"npi",
"oci",
"ori",
"orv_Cyrl",
"oss",
"pan_Guru",
"pap",
"pcd",
"pdc",
"pes",
"pes_Latn",
"pes_Thaa",
"pms",
"pnb",
"pol",
"por",
"prg_Latn",
"pus",
"roh",
"rom",
"ron",
"rue",
"rus",
"rus_Latn",
"san_Deva",
"scn",
"sco",
"sgs",
"sin",
"slv",
"snd_Arab",
"spa",
"sqi",
"srd",
"srp_Cyrl",
"srp_Latn",
"stq",
"swe",
"swg",
"tgk_Cyrl",
"tly_Latn",
"tmw_Latn",
"ukr",
"urd",
"vec",
"wln",
"yid",
"zlm_Latn",
"zsm_Latn",
"zza",
},
),
"isl": ("Icelandic", {"isl"}),
"ita": ("Italian", {"ita"}),
"itc": (
"Italic languages",
{
"arg",
"ast",
"bjn",
"cat",
"cos",
"egl",
"ext",
"fra",
"frm_Latn",
"gcf_Latn",
"glg",
"hat",
"ind",
"ita",
"lad",
"lad_Latn",
"lat_Grek",
"lat_Latn",
"lij",
"lld_Latn",
"lmo",
"max_Latn",
"mfe",
"min",
"mwl",
"oci",
"pap",
"pcd",
"pms",
"por",
"roh",
"ron",
"scn",
"spa",
"srd",
"tmw_Latn",
"vec",
"wln",
"zlm_Latn",
"zsm_Latn",
},
),
"jpn": ("Japanese", {"jpn", "jpn_Bopo", "jpn_Hang", "jpn_Hani", "jpn_Hira", "jpn_Kana", "jpn_Latn", "jpn_Yiii"}),
"jpx": ("Japanese (family)", {"jpn"}),
"kat": ("Georgian", {"kat"}),
"kor": ("Korean", {"kor_Hani", "kor_Hang", "kor_Latn", "kor"}),
"lav": ("Latvian", {"lav"}),
"lit": ("Lithuanian", {"lit"}),
"mkd": ("Macedonian", {"mkd"}),
"mkh": ("Mon-Khmer languages", {"vie_Hani", "mnw", "vie", "kha", "khm_Latn", "khm"}),
"msa": ("Malay (macrolanguage)", {"zsm_Latn", "ind", "max_Latn", "zlm_Latn", "min"}),
"mul": (
"Multiple languages",
{
"abk",
"acm",
"ady",
"afb",
"afh_Latn",
"afr",
"akl_Latn",
"aln",
"amh",
"ang_Latn",
"apc",
"ara",
"arg",
"arq",
"ary",
"arz",
"asm",
"ast",
"avk_Latn",
"awa",
"aze_Latn",
"bak",
"bam_Latn",
"bel",
"bel_Latn",
"ben",
"bho",
"bod",
"bos_Latn",
"bre",
"brx",
"brx_Latn",
"bul",
"bul_Latn",
"cat",
"ceb",
"ces",
"cha",
"che",
"chr",
"chv",
"cjy_Hans",
"cjy_Hant",
"cmn",
"cmn_Hans",
"cmn_Hant",
"cor",
"cos",
"crh",
"crh_Latn",
"csb_Latn",
"cym",
"dan",
"deu",
"dsb",
"dtp",
"dws_Latn",
"egl",
"ell",
"enm_Latn",
"epo",
"est",
"eus",
"ewe",
"ext",
"fao",
"fij",
"fin",
"fkv_Latn",
"fra",
"frm_Latn",
"frr",
"fry",
"fuc",
"fuv",
"gan",
"gcf_Latn",
"gil",
"gla",
"gle",
"glg",
"glv",
"gom",
"gos",
"got_Goth",
"grc_Grek",
"grn",
"gsw",
"guj",
"hat",
"hau_Latn",
"haw",
"heb",
"hif_Latn",
"hil",
"hin",
"hnj_Latn",
"hoc",
"hoc_Latn",
"hrv",
"hsb",
"hun",
"hye",
"iba",
"ibo",
"ido",
"ido_Latn",
"ike_Latn",
"ile_Latn",
"ilo",
"ina_Latn",
"ind",
"isl",
"ita",
"izh",
"jav",
"jav_Java",
"jbo",
"jbo_Cyrl",
"jbo_Latn",
"jdt_Cyrl",
"jpn",
"kab",
"kal",
"kan",
"kat",
"kaz_Cyrl",
"kaz_Latn",
"kek_Latn",
"kha",
"khm",
"khm_Latn",
"kin",
"kir_Cyrl",
"kjh",
"kpv",
"krl",
"ksh",
"kum",
"kur_Arab",
"kur_Latn",
"lad",
"lad_Latn",
"lao",
"lat_Latn",
"lav",
"ldn_Latn",
"lfn_Cyrl",
"lfn_Latn",
"lij",
"lin",
"lit",
"liv_Latn",
"lkt",
"lld_Latn",
"lmo",
"ltg",
"ltz",
"lug",
"lzh",
"lzh_Hans",
"mad",
"mah",
"mai",
"mal",
"mar",
"max_Latn",
"mdf",
"mfe",
"mhr",
"mic",
"min",
"mkd",
"mlg",
"mlt",
"mnw",
"moh",
"mon",
"mri",
"mwl",
"mww",
"mya",
"myv",
"nan",
"nau",
"nav",
"nds",
"niu",
"nld",
"nno",
"nob",
"nob_Hebr",
"nog",
"non_Latn",
"nov_Latn",
"npi",
"nya",
"oci",
"ori",
"orv_Cyrl",
"oss",
"ota_Arab",
"ota_Latn",
"pag",
"pan_Guru",
"pap",
"pau",
"pdc",
"pes",
"pes_Latn",
"pes_Thaa",
"pms",
"pnb",
"pol",
"por",
"ppl_Latn",
"prg_Latn",
"pus",
"quc",
"qya",
"qya_Latn",
"rap",
"rif_Latn",
"roh",
"rom",
"ron",
"rue",
"run",
"rus",
"sag",
"sah",
"san_Deva",
"scn",
"sco",
"sgs",
"shs_Latn",
"shy_Latn",
"sin",
"sjn_Latn",
"slv",
"sma",
"sme",
"smo",
"sna",
"snd_Arab",
"som",
"spa",
"sqi",
"srp_Cyrl",
"srp_Latn",
"stq",
"sun",
"swe",
"swg",
"swh",
"tah",
"tam",
"tat",
"tat_Arab",
"tat_Latn",
"tel",
"tet",
"tgk_Cyrl",
"tha",
"tir",
"tlh_Latn",
"tly_Latn",
"tmw_Latn",
"toi_Latn",
"ton",
"tpw_Latn",
"tso",
"tuk",
"tuk_Latn",
"tur",
"tvl",
"tyv",
"tzl",
"tzl_Latn",
"udm",
"uig_Arab",
"uig_Cyrl",
"ukr",
"umb",
"urd",
"uzb_Cyrl",
"uzb_Latn",
"vec",
"vie",
"vie_Hani",
"vol_Latn",
"vro",
"war",
"wln",
"wol",
"wuu",
"xal",
"xho",
"yid",
"yor",
"yue",
"yue_Hans",
"yue_Hant",
"zho",
"zho_Hans",
"zho_Hant",
"zlm_Latn",
"zsm_Latn",
"zul",
"zza",
},
),
"nic": (
"Niger-Kordofanian languages",
{
"bam_Latn",
"ewe",
"fuc",
"fuv",
"ibo",
"kin",
"lin",
"lug",
"nya",
"run",
"sag",
"sna",
"swh",
"toi_Latn",
"tso",
"umb",
"wol",
"xho",
"yor",
"zul",
},
),
"nld": ("Dutch", {"nld"}),
"nor": ("Norwegian", {"nob", "nno"}),
"phi": ("Philippine languages", {"ilo", "akl_Latn", "war", "hil", "pag", "ceb"}),
"pol": ("Polish", {"pol"}),
"por": ("Portuguese", {"por"}),
"pqe": (
"Eastern Malayo-Polynesian languages",
{"fij", "gil", "haw", "mah", "mri", "nau", "niu", "rap", "smo", "tah", "ton", "tvl"},
),
"roa": (
"Romance languages",
{
"arg",
"ast",
"cat",
"cos",
"egl",
"ext",
"fra",
"frm_Latn",
"gcf_Latn",
"glg",
"hat",
"ind",
"ita",
"lad",
"lad_Latn",
"lij",
"lld_Latn",
"lmo",
"max_Latn",
"mfe",
"min",
"mwl",
"oci",
"pap",
"pms",
"por",
"roh",
"ron",
"scn",
"spa",
"tmw_Latn",
"vec",
"wln",
"zlm_Latn",
"zsm_Latn",
},
),
"ron": ("Romanian", {"ron"}),
"run": ("Rundi", {"run"}),
"rus": ("Russian", {"rus"}),
"sal": ("Salishan languages", {"shs_Latn"}),
"sem": ("Semitic languages", {"acm", "afb", "amh", "apc", "ara", "arq", "ary", "arz", "heb", "mlt", "tir"}),
"sla": (
"Slavic languages",
{
"bel",
"bel_Latn",
"bos_Latn",
"bul",
"bul_Latn",
"ces",
"csb_Latn",
"dsb",
"hrv",
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | true |
huggingface/transformers | https://github.com/huggingface/transformers/blob/a7f29523361b2cc12e51c1f5133d95f122f6f45c/src/transformers/models/marian/configuration_marian.py | src/transformers/models/marian/configuration_marian.py | # coding=utf-8
# Copyright 2021 The Marian Team Authors and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Marian model configuration"""
from ...configuration_utils import PreTrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
class MarianConfig(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`MarianModel`]. It is used to instantiate an
Marian model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Marian
[Helsinki-NLP/opus-mt-en-de](https://huggingface.co/Helsinki-NLP/opus-mt-en-de) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 58101):
Vocabulary size of the Marian model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`MarianModel`].
d_model (`int`, *optional*, defaults to 1024):
Dimensionality of the layers and the pooler layer.
encoder_layers (`int`, *optional*, defaults to 12):
Number of encoder layers.
decoder_layers (`int`, *optional*, defaults to 12):
Number of decoder layers.
encoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
decoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
encoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
max_position_embeddings (`int`, *optional*, defaults to 1024):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
encoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
decoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
scale_embedding (`bool`, *optional*, defaults to `False`):
Scale embeddings by diving by sqrt(d_model).
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models)
forced_eos_token_id (`int`, *optional*, defaults to 0):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
Examples:
```python
>>> from transformers import MarianModel, MarianConfig
>>> # Initializing a Marian Helsinki-NLP/opus-mt-en-de style configuration
>>> configuration = MarianConfig()
>>> # Initializing a model from the Helsinki-NLP/opus-mt-en-de style configuration
>>> model = MarianModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "marian"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__(
self,
vocab_size=58101,
decoder_vocab_size=None,
max_position_embeddings=1024,
encoder_layers=12,
encoder_ffn_dim=4096,
encoder_attention_heads=16,
decoder_layers=12,
decoder_ffn_dim=4096,
decoder_attention_heads=16,
encoder_layerdrop=0.0,
decoder_layerdrop=0.0,
use_cache=True,
is_encoder_decoder=True,
activation_function="gelu",
d_model=1024,
dropout=0.1,
attention_dropout=0.0,
activation_dropout=0.0,
init_std=0.02,
decoder_start_token_id=58100,
scale_embedding=False,
pad_token_id=58100,
eos_token_id=0,
forced_eos_token_id=0,
share_encoder_decoder_embeddings=True,
**kwargs,
):
self.vocab_size = vocab_size
self.decoder_vocab_size = decoder_vocab_size or vocab_size
self.max_position_embeddings = max_position_embeddings
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.use_cache = use_cache
self.num_hidden_layers = encoder_layers
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
self.share_encoder_decoder_embeddings = share_encoder_decoder_embeddings
kwargs["tie_word_embeddings"] = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
is_encoder_decoder=is_encoder_decoder,
decoder_start_token_id=decoder_start_token_id,
forced_eos_token_id=forced_eos_token_id,
**kwargs,
)
__all__ = ["MarianConfig"]
| python | Apache-2.0 | a7f29523361b2cc12e51c1f5133d95f122f6f45c | 2026-01-04T14:38:15.407064Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.