text
stringlengths
5
631k
id
stringlengths
14
178
metadata
dict
__index_level_0__
int64
0
647
""" EVA EVA ViT from https://github.com/baaivision/EVA , paper: https://arxiv.org/abs/2211.07636 @article{EVA, title={EVA: Exploring the Limits of Masked Visual Representation Learning at Scale}, author={Fang, Yuxin and Wang, Wen and Xie, Binhui and Sun, Quan and Wu, Ledell and Wang, Xinggang and Huang, Tiejun and Wang, Xinlong and Cao, Yue}, journal={arXiv preprint arXiv:2211.07636}, year={2022} } EVA-02: A Visual Representation for Neon Genesis - https://arxiv.org/abs/2303.11331 @article{EVA02, title={EVA-02: A Visual Representation for Neon Genesis}, author={Fang, Yuxin and Sun, Quan and Wang, Xinggang and Huang, Tiejun and Wang, Xinlong and Cao, Yue}, journal={arXiv preprint arXiv:2303.11331}, year={2023} } @article{bolya2025perception, title={Perception encoder: The best visual embeddings are not at the output of the network}, author={Bolya, Daniel and Huang, Po-Yao and Sun, Peize and Cho, Jang Hyun and Madotto, Andrea and Wei, Chen and Ma, Tengyu and Zhi, Jiale and Rajasegaran, Jathushan and Rasheed, Hanoona and others}, journal={arXiv preprint arXiv:2504.13181}, year={2025} } @inproceedings{heo2024rotary, title={Rotary position embedding for vision transformer}, author={Heo, Byeongho and Park, Song and Han, Dongyoon and Yun, Sangdoo}, booktitle={European Conference on Computer Vision}, pages={289--305}, year={2024}, organization={Springer} } This file contains a number of ViT variants the utilise ROPE position embeddings, SwiGLU and other additions: * EVA & EVA02 model implementations that evolved from BEiT, additional models in vision_transformer.py. * `timm` original SBB ViT w/ ROPE position embeddings * Perception Encoder (PE) ViT from Meta (https://arxiv.org/abs/2504.13181) * ROPE-ViT from Naver AI (https://arxiv.org/abs/2403.13298) Modifications by / Copyright 2023 Ross Wightman, original copyrights below """ # EVA models Copyright (c) 2022 BAAI-Vision # EVA02 models Copyright (c) 2023 BAAI-Vision import math import os from functools import partial from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, OPENAI_CLIP_MEAN, OPENAI_CLIP_STD from timm.layers import ( PatchEmbed, Mlp, GluMlp, SwiGLU, LayerNorm, DropPath, PatchDropoutWithIndices, RotaryEmbeddingCat, RotaryEmbeddingMixed, apply_rot_embed_cat, apply_keep_indices_nlc, trunc_normal_, resample_patch_embed, resample_abs_pos_embed, global_pool_nlc, to_2tuple, use_fused_attn, AttentionRope, AttentionPoolLatent, ) from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._manipulate import checkpoint from ._registry import generate_default_cfgs, register_model __all__ = ['Eva'] class EvaAttention(nn.Module): """ EVA Attention with ROPE, no k-bias, and fused/unfused qkv options """ fused_attn: torch.jit.Final[bool] def __init__( self, dim: int, num_heads: int = 8, qkv_bias: bool = True, qkv_fused: bool = True, qkv_bias_separate: bool = False, num_prefix_tokens: int = 1, attn_drop: float = 0., proj_drop: float = 0., attn_head_dim: Optional[int] = None, norm_layer: Optional[Callable] = None, qk_norm: bool = False, scale_norm: bool = True, ): """ Args: dim: Input dimension of the token embeddings num_heads: Number of attention heads qkv_bias: Whether to add a bias term to the query, key, and value projections qkv_fused: Whether qkv projections are fused into one projection or separate qkv_bias_separate: Whether to apply bias to qkv as a separate addition or part of F.linear() call num_prefix_tokens: Number of reg/cls tokens at the beginning of the sequence that should not have position embeddings applied attn_drop: Dropout rate for attention weights proj_drop: Dropout rate for the output projection attn_head_dim: Dimension of each attention head (if None, computed as dim // num_heads) norm_layer: Normalization layer constructor to use for QK and scale normalization qk_norm: Enable normalization of query (Q) and key (K) vectors with norm_layer scale_norm: Enable normalization (scaling) of attention output with norm_layer """ super().__init__() if scale_norm or qk_norm: assert norm_layer is not None, 'norm_layer must be provided if qk_norm or scale_norm is True' self.num_heads = num_heads head_dim = dim // num_heads if attn_head_dim is not None: head_dim = attn_head_dim attn_dim = head_dim * self.num_heads self.scale = head_dim ** -0.5 self.num_prefix_tokens = num_prefix_tokens self.fused_attn = use_fused_attn() self.qkv_bias_separate = qkv_bias_separate if qkv_fused: self.qkv = nn.Linear(dim, attn_dim * 3, bias=False) self.q_proj = self.k_proj = self.v_proj = None if qkv_bias: self.q_bias = nn.Parameter(torch.zeros(attn_dim)) self.register_buffer('k_bias', torch.zeros(attn_dim), persistent=False) self.v_bias = nn.Parameter(torch.zeros(attn_dim)) else: self.q_bias = self.k_bias = self.v_bias = None else: self.q_proj = nn.Linear(dim, attn_dim, bias=qkv_bias) self.k_proj = nn.Linear(dim, attn_dim, bias=False) self.v_proj = nn.Linear(dim, attn_dim, bias=qkv_bias) self.qkv = None self.q_bias = self.k_bias = self.v_bias = None self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() self.attn_drop = nn.Dropout(attn_drop) self.norm = norm_layer(attn_dim) if scale_norm else nn.Identity() self.proj = nn.Linear(attn_dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward( self, x, rope: Optional[torch.Tensor] = None, attn_mask: Optional[torch.Tensor] = None, ): """Forward pass for the attention module. Args: x: Input tensor of shape (batch_size, sequence_length, embedding_dim) rope: Rotary position embeddings tensor for position-aware attention attn_mask: Optional attention mask to apply during attention computation Returns: Tensor of shape (batch_size, sequence_length, embedding_dim) """ B, N, C = x.shape if self.qkv is not None: if self.q_bias is None: qkv = self.qkv(x) else: qkv_bias = torch.cat((self.q_bias, self.k_bias, self.v_bias)) if self.qkv_bias_separate: qkv = self.qkv(x) qkv += qkv_bias else: qkv = F.linear(x, weight=self.qkv.weight, bias=qkv_bias) qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) q, k, v = qkv.unbind(0) # B, num_heads, N, head_dim else: q = self.q_proj(x).reshape(B, N, self.num_heads, -1).transpose(1, 2) # B, num_heads, N, C k = self.k_proj(x).reshape(B, N, self.num_heads, -1).transpose(1, 2) v = self.v_proj(x).reshape(B, N, self.num_heads, -1).transpose(1, 2) q, k = self.q_norm(q), self.k_norm(k) if rope is not None: npt = self.num_prefix_tokens q = torch.cat([q[:, :, :npt, :], apply_rot_embed_cat(q[:, :, npt:, :], rope)], dim=2).type_as(v) k = torch.cat([k[:, :, :npt, :], apply_rot_embed_cat(k[:, :, npt:, :], rope)], dim=2).type_as(v) if self.fused_attn: x = F.scaled_dot_product_attention( q, k, v, attn_mask=attn_mask, dropout_p=self.attn_drop.p if self.training else 0., ) else: q = q * self.scale attn = (q @ k.transpose(-2, -1)) if attn_mask is not None: attn_mask = attn_mask.to(torch.bool) attn = attn.masked_fill(~attn_mask[:, None, None, :], float("-inf")) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.transpose(1, 2).reshape(B, N, C) x = self.norm(x) x = self.proj(x) x = self.proj_drop(x) return x class EvaBlock(nn.Module): def __init__( self, dim: int, num_heads: int, qkv_bias: bool = True, qkv_fused: bool = True, mlp_ratio: float = 4., swiglu_mlp: bool = False, scale_mlp: bool = False, scale_attn_inner: bool = False, num_prefix_tokens: int = 1, attn_type: str = 'eva', proj_drop: float = 0., attn_drop: float = 0., drop_path: float = 0., init_values: Optional[float] = None, act_layer: Callable = nn.GELU, norm_layer: Callable = LayerNorm, attn_head_dim: Optional[int] = None, **kwargs, ): """ Initialize the EVA transformer block. Args: dim: Input dimension of the token embeddings num_heads: Number of attention heads qkv_bias: Whether to use bias terms in query, key, value projections qkv_fused: Whether to use a single projection for query, key, value mlp_ratio: Ratio of MLP hidden dimension to input dimension swiglu_mlp: Whether to use SwiGLU activation in the MLP scale_mlp: Whether to use normalization in the MLP scale_attn_inner: Whether to use normalization within the attention mechanism num_prefix_tokens: Number of tokens at the beginning of the sequence (class tokens, etc.) attn_type: Type of attention module to use ('eva' or 'rope') proj_drop: Dropout rate for projection layers attn_drop: Dropout rate for attention matrix drop_path: Stochastic depth rate init_values: Initial value for LayerScale, None = no LayerScale act_layer: Activation layer constructor norm_layer: Normalization layer constructor attn_head_dim: Dimension of each attention head (if None, computed as dim // num_heads) """ super().__init__() self.norm1 = norm_layer(dim) attn_cls = AttentionRope if attn_type == 'rope' else EvaAttention self.attn = attn_cls( dim, num_heads=num_heads, qkv_bias=qkv_bias, qkv_fused=qkv_fused, num_prefix_tokens=num_prefix_tokens, attn_drop=attn_drop, proj_drop=proj_drop, attn_head_dim=attn_head_dim, norm_layer=norm_layer, scale_norm=scale_attn_inner, ) self.gamma_1 = nn.Parameter(init_values * torch.ones(dim)) if init_values is not None else None self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) hidden_features = int(dim * mlp_ratio) if swiglu_mlp: if scale_mlp: # when norm in SwiGLU used, an impl with separate fc for gate & x is used self.mlp = SwiGLU( in_features=dim, hidden_features=hidden_features, norm_layer=norm_layer if scale_mlp else None, drop=proj_drop, ) else: # w/o any extra norm, an impl with packed weights is used, matches existing GluMLP self.mlp = GluMlp( in_features=dim, hidden_features=hidden_features * 2, norm_layer=norm_layer if scale_mlp else None, act_layer=nn.SiLU, gate_last=False, drop=proj_drop, ) else: self.mlp = Mlp( in_features=dim, hidden_features=hidden_features, act_layer=act_layer, norm_layer=norm_layer if scale_mlp else None, drop=proj_drop, ) self.gamma_2 = nn.Parameter(init_values * torch.ones(dim)) if init_values is not None else None self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward( self, x: torch.Tensor, rope: Optional[torch.Tensor] = None, attn_mask: Optional[torch.Tensor] = None, ) -> torch.Tensor: if self.gamma_1 is None: x = x + self.drop_path1(self.attn(self.norm1(x), rope=rope, attn_mask=attn_mask)) x = x + self.drop_path2(self.mlp(self.norm2(x))) else: x = x + self.drop_path1(self.gamma_1 * self.attn(self.norm1(x), rope=rope, attn_mask=attn_mask)) x = x + self.drop_path2(self.gamma_2 * self.mlp(self.norm2(x))) return x class EvaBlockPostNorm(nn.Module): """ EVA block w/ post-norm and support for swiglu, MLP norm scale, ROPE. """ def __init__( self, dim: int, num_heads: int, qkv_bias: bool = True, qkv_fused: bool = True, mlp_ratio: float = 4., attn_type: str = 'eva', swiglu_mlp: bool = False, scale_mlp: bool = False, scale_attn_inner: bool = False, num_prefix_tokens: int = 1, proj_drop: float = 0., attn_drop: float = 0., drop_path: float = 0., init_values: Optional[float] = None, # ignore for post-norm act_layer: Callable = nn.GELU, norm_layer: Callable = nn.LayerNorm, attn_head_dim: Optional[int] = None, ): """ Initialize the post-norm EVA transformer block. Args: dim: Input dimension of the token embeddings num_heads: Number of attention heads qkv_bias: Whether to use bias terms in query, key, value projections qkv_fused: Whether to use a single projection for query, key, value mlp_ratio: Ratio of MLP hidden dimension to input dimension swiglu_mlp: Whether to use SwiGLU activation in the MLP scale_mlp: Whether to use normalization in the MLP scale_attn_inner: Whether to use normalization within the attention mechanism num_prefix_tokens: Number of tokens at the beginning of the sequence (class tokens, etc.) attn_type: Type of attention module to use ('eva' or 'rope') proj_drop: Dropout rate for projection layers attn_drop: Dropout rate for attention matrix drop_path: Stochastic depth rate init_values: Initial value for LayerScale, None = no LayerScale (NOTE: ignored for post-norm block) act_layer: Activation layer constructor norm_layer: Normalization layer constructor attn_head_dim: Dimension of each attention head (if None, computed as dim // num_heads) """ super().__init__() attn_cls = AttentionRope if attn_type == 'rope' else EvaAttention self.attn = attn_cls( dim, num_heads=num_heads, qkv_bias=qkv_bias, qkv_fused=qkv_fused, num_prefix_tokens=num_prefix_tokens, attn_drop=attn_drop, proj_drop=proj_drop, attn_head_dim=attn_head_dim, norm_layer=norm_layer, scale_norm=scale_attn_inner, ) self.norm1 = norm_layer(dim) self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() hidden_features = int(dim * mlp_ratio) if swiglu_mlp: if scale_mlp: # when norm in SwiGLU used, an impl with separate fc for gate & x is used self.mlp = SwiGLU( in_features=dim, hidden_features=hidden_features, norm_layer=norm_layer if scale_mlp else None, drop=proj_drop, ) else: # w/o any extra norm, an impl with packed fc1 weights is used, matches existing GluMLP self.mlp = GluMlp( in_features=dim, hidden_features=hidden_features * 2, norm_layer=norm_layer if scale_mlp else None, act_layer=nn.SiLU, gate_last=False, drop=proj_drop, ) else: self.mlp = Mlp( in_features=dim, hidden_features=hidden_features, act_layer=act_layer, norm_layer=norm_layer if scale_mlp else None, drop=proj_drop, ) self.norm2 = norm_layer(dim) self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward( self, x: torch.Tensor, rope: Optional[torch.Tensor] = None, attn_mask: Optional[torch.Tensor] = None, ) -> torch.Tensor: x = x + self.drop_path1(self.norm1(self.attn(x, rope=rope, attn_mask=attn_mask))) x = x + self.drop_path2(self.norm2(self.mlp(x))) return x class Eva(nn.Module): """ Eva Vision Transformer w/ Abs & Rotary Pos Embed This class implements the EVA and EVA02 models that were based on the BEiT ViT variant * EVA - abs pos embed, global avg pool * EVA02 - abs + rope pos embed, global avg pool, SwiGLU, scale Norm in MLP (ala normformer) """ def __init__( self, img_size: Union[int, Tuple[int, int]] = 224, patch_size: Union[int, Tuple[int, int]] = 16, in_chans: int = 3, num_classes: int = 1000, global_pool: str = 'avg', embed_dim: int = 768, depth: int = 12, num_heads: int = 12, qkv_bias: bool = True, qkv_fused: bool = True, mlp_ratio: float = 4., swiglu_mlp: bool = False, scale_mlp: bool = False, scale_attn_inner: bool = False, attn_type: str = 'eva', drop_rate: float = 0., pos_drop_rate: float = 0., patch_drop_rate: float = 0., proj_drop_rate: float = 0., attn_drop_rate: float = 0., drop_path_rate: float = 0., norm_layer: Callable = LayerNorm, init_values: Optional[float] = None, class_token: bool = True, num_reg_tokens: int = 0, no_embed_class: bool = False, use_abs_pos_emb: bool = True, use_rot_pos_emb: bool = False, rope_mixed_mode: bool = False, rope_grid_offset: float = 0., rope_grid_indexing: str = 'ij', rope_temperature: float = 10000., use_post_norm: bool = False, use_pre_transformer_norm: bool = False, use_post_transformer_norm: Optional[bool] = None, use_fc_norm: Optional[bool] = None, attn_pool_num_heads: Optional[int] = None, attn_pool_mlp_ratio: Optional[float] = None, dynamic_img_size: bool = False, dynamic_img_pad: bool = False, ref_feat_shape: Optional[Union[Tuple[int, int], int]] = None, head_init_scale: float = 0.001, ): """Initialize the EVA Vision Transformer model. Args: img_size: Input image size (single int for square, or tuple for rectangular) patch_size: Patch size to divide image into tokens (single int for square, or tuple) in_chans: Number of input image channels num_classes: Number of classes (output dim) for classification head (final projection), 0 for pass-through global_pool: Type of global pooling for final sequence ('avg', 'token', 'map', etc.) embed_dim: Embedding dimension for tokens depth: Number of transformer blocks num_heads: Number of attention heads qkv_bias: Enable bias for query, key, value projections qkv_fused: Use a single projection for query, key, value mlp_ratio: Ratio of mlp hidden dim to embedding dim swiglu_mlp: Use SwiGLU activation in MLP scale_mlp: Apply scaling normalization in MLP (normformer style) scale_attn_inner: Apply scaling normalization inside attention attn_type: Type of attention module to use drop_rate: Dropout rate after final projection and pooling pos_drop_rate: Dropout rate for positional embeddings patch_drop_rate: Rate of dropping patches during training proj_drop_rate: Dropout rate for projections attn_drop_rate: Dropout rate for attention drop_path_rate: Stochastic depth rate norm_layer: Normalization layer constructor init_values: Initial layer-scale values class_token: Use class token num_reg_tokens: Number of additional learnable 'register' tokens to add to the sequence no_embed_class: Don't include position embeddings for class (or reg) tokens use_abs_pos_emb: Use absolute (learned) positional embeddings use_rot_pos_emb: Use rotary position embeddings rope_mixed_mode: Use mixed mode ROPE with per-layer learnable frequencies rope_grid_offset: Offset for rotary position embedding grid rope_grid_indexing: Indexing mode for rotary position embeddings ('ij' or 'xy') rope_temperature: Temperature parameter for ROPE frequency computation use_post_norm: Use post-norm transformer block type use_pre_transformer_norm: Use normalization layer before transformer blocks use_post_transformer_norm: Use normalization layer after transformer blocks use_fc_norm: Use normalization layer after pooling, before final classifier attn_pool_num_heads: Number of heads in attention pooling attn_pool_mlp_ratio: MLP ratio in attention pooling dynamic_img_size: Support dynamic image sizes in forward pass dynamic_img_pad: Apply dynamic padding for irregular image sizes ref_feat_shape: Reference feature shape for rotary position embedding scale head_init_scale: Initialization scale for classification head weights """ super().__init__() assert global_pool in ('', 'avg', 'avgmax', 'max', 'token', 'map') self.num_classes = num_classes self.global_pool = global_pool self.num_features = self.head_hidden_size = self.embed_dim = embed_dim # for consistency with other models self.num_prefix_tokens = (1 if class_token else 0) + num_reg_tokens self.no_embed_class = no_embed_class self.dynamic_img_size = dynamic_img_size self.grad_checkpointing = False # resolve norm / pool usage activate_pre_norm = use_pre_transformer_norm if use_fc_norm is not None: activate_fc_norm = use_fc_norm # pass through if explicit else: activate_fc_norm = global_pool == 'avg' # default on if avg pool used if use_post_transformer_norm is not None: activate_post_norm = use_post_transformer_norm # pass through if explicit else: activate_post_norm = not activate_fc_norm # default on if fc_norm isn't active embed_args = {} if dynamic_img_size: # flatten deferred until after pos embed embed_args.update(dict(strict_img_size=False, output_fmt='NHWC')) self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, dynamic_img_pad=dynamic_img_pad, bias=not use_pre_transformer_norm, **embed_args, ) num_patches = self.patch_embed.num_patches r = self.patch_embed.feat_ratio() if hasattr(self.patch_embed, 'feat_ratio') else patch_size self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if class_token else None self.reg_token = nn.Parameter(torch.zeros(1, num_reg_tokens, embed_dim)) if num_reg_tokens else None self.cls_embed = class_token and self.reg_token is None num_pos_tokens = num_patches if no_embed_class else num_patches + self.num_prefix_tokens self.pos_embed = nn.Parameter(torch.zeros(1, num_pos_tokens, embed_dim)) if use_abs_pos_emb else None self.pos_drop = nn.Dropout(p=pos_drop_rate) if patch_drop_rate > 0: self.patch_drop = PatchDropoutWithIndices(patch_drop_rate, num_prefix_tokens=self.num_prefix_tokens) else: self.patch_drop = None if use_rot_pos_emb: ref_feat_shape = to_2tuple(ref_feat_shape) if ref_feat_shape is not None else None if rope_mixed_mode: self.rope_mixed = True # Mixed mode to supports depth-dependent frequencies self.rope = RotaryEmbeddingMixed( dim=embed_dim, depth=depth, num_heads=num_heads, temperature=rope_temperature, feat_shape=None if dynamic_img_size else self.patch_embed.grid_size, grid_indexing=rope_grid_indexing, ) else: self.rope_mixed = False self.rope = RotaryEmbeddingCat( dim=embed_dim // num_heads, temperature=rope_temperature, in_pixels=False, feat_shape=None if dynamic_img_size else self.patch_embed.grid_size, ref_feat_shape=ref_feat_shape, grid_offset=rope_grid_offset, grid_indexing=rope_grid_indexing, ) else: self.rope_mixed = False self.rope = None self.norm_pre = norm_layer(embed_dim) if activate_pre_norm else nn.Identity() dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule block_fn = EvaBlockPostNorm if use_post_norm else EvaBlock self.blocks = nn.ModuleList([ block_fn( dim=embed_dim, num_heads=num_heads, qkv_bias=qkv_bias, qkv_fused=qkv_fused, mlp_ratio=mlp_ratio, swiglu_mlp=swiglu_mlp, scale_mlp=scale_mlp, scale_attn_inner=scale_attn_inner, attn_type=attn_type, num_prefix_tokens=self.num_prefix_tokens, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, init_values=init_values, ) for i in range(depth)]) self.feature_info = [ dict(module=f'blocks.{i}', num_chs=embed_dim, reduction=r) for i in range(depth)] self.norm = norm_layer(embed_dim) if activate_post_norm else nn.Identity() if global_pool == 'map': self.attn_pool = AttentionPoolLatent( self.embed_dim, num_heads=attn_pool_num_heads or num_heads, mlp_ratio=attn_pool_mlp_ratio or mlp_ratio, norm_layer=norm_layer, act_layer=nn.GELU, ) else: self.attn_pool = None self.fc_norm = norm_layer(embed_dim) if activate_fc_norm else nn.Identity() self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() self.apply(self._init_weights) if self.pos_embed is not None: trunc_normal_(self.pos_embed, std=.02) if self.cls_token is not None: trunc_normal_(self.cls_token, std=.02) if self.reg_token is not None: trunc_normal_(self.reg_token, std=.02) self.fix_init_weight() if isinstance(self.head, nn.Linear): trunc_normal_(self.head.weight, std=.02) self.head.weight.data.mul_(head_init_scale) self.head.bias.data.mul_(head_init_scale) def fix_init_weight(self) -> None: """Fix initialization weights by rescaling based on layer depth.""" def rescale(param, layer_id): param.div_(math.sqrt(2.0 * layer_id)) for layer_id, layer in enumerate(self.blocks): rescale(layer.attn.proj.weight.data, layer_id + 1) rescale(layer.mlp.fc2.weight.data, layer_id + 1) def _init_weights(self, m: nn.Module) -> None: """Initialize weights for Linear layers. Args: m: Module to initialize. """ if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if m.bias is not None: nn.init.zeros_(m.bias) @torch.jit.ignore def no_weight_decay(self) -> Set[str]: """Parameters to exclude from weight decay.""" nwd = {'pos_embed', 'cls_token'} if (rope := getattr(self, "rope", None)) and hasattr(rope, "no_weight_decay"): return nwd | {f"rope.{p}" for p in rope.no_weight_decay()} return nwd @torch.jit.ignore def set_grad_checkpointing(self, enable: bool = True) -> None: """Enable or disable gradient checkpointing.""" self.grad_checkpointing = enable @torch.jit.ignore def group_matcher(self, coarse: bool = False) -> Dict[str, Any]: """Create layer groupings for optimization.""" matcher = dict( stem=r'^cls_token|pos_embed|patch_embed', # stem and embed blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))], ) return matcher @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None) -> None: """Reset the classifier head. Args: num_classes: Number of output classes. global_pool: Global pooling type. """ self.num_classes = num_classes if global_pool is not None: self.global_pool = global_pool self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() def set_input_size( self, img_size: Optional[Tuple[int, int]] = None, patch_size: Optional[Tuple[int, int]] = None, ) -> None: """Update the input image resolution and patch size. Args: img_size: New input resolution, if None current resolution is used. patch_size: New patch size, if None existing patch size is used. """ prev_grid_size = self.patch_embed.grid_size self.patch_embed.set_input_size(img_size=img_size, patch_size=patch_size) if self.pos_embed is not None: num_prefix_tokens = 0 if self.no_embed_class else self.num_prefix_tokens num_new_tokens = self.patch_embed.num_patches + num_prefix_tokens if num_new_tokens != self.pos_embed.shape[1]: self.pos_embed = nn.Parameter(resample_abs_pos_embed( self.pos_embed, new_size=self.patch_embed.grid_size, old_size=prev_grid_size, num_prefix_tokens=num_prefix_tokens, verbose=True, )) if self.rope is not None: self.rope.update_feat_shape(self.patch_embed.grid_size) def _pos_embed(self, x) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: if self.dynamic_img_size: B, H, W, C = x.shape if self.pos_embed is not None: prev_grid_size = self.patch_embed.grid_size pos_embed = resample_abs_pos_embed( self.pos_embed, new_size=(H, W), old_size=prev_grid_size, num_prefix_tokens=0 if self.no_embed_class else self.num_prefix_tokens, ) else: pos_embed = None x = x.view(B, -1, C) rot_pos_embed = self.rope.get_embed(shape=(H, W)) if self.rope is not None else None else: pos_embed = self.pos_embed rot_pos_embed = self.rope.get_embed() if self.rope is not None else None to_cat = [] if self.cls_token is not None: to_cat.append(self.cls_token.expand(x.shape[0], -1, -1)) if self.reg_token is not None: to_cat.append(self.reg_token.expand(x.shape[0], -1, -1)) if self.no_embed_class: # position embedding does not overlap with class / reg token if pos_embed is not None: x = x + pos_embed if to_cat: x = torch.cat(to_cat + [x], dim=1) else: # pos_embed has entry for class / reg token, concat then add if to_cat: x = torch.cat(to_cat + [x], dim=1) if pos_embed is not None: x = x + pos_embed x = self.pos_drop(x) # apply patch dropout to patches and rotary position embedding if self.patch_drop is not None: x, keep_indices = self.patch_drop(x) if rot_pos_embed is not None and keep_indices is not None: rot_pos_embed = apply_keep_indices_nlc(x, rot_pos_embed, keep_indices) # After applying keep indices to rope embeds, batch dim is added if getattr(self, 'rope_mixed', False): # B, D, nH, N, dim -> D, B, nH, N, dim. For consistent iteration over depth at index 0. rot_pos_embed = rot_pos_embed.transpose(0, 1) else: # B, N, dim -> B, 1, N, dim. Need head dim singleton for correct dim alignment in axial mode. rot_pos_embed = rot_pos_embed.unsqueeze(1) return x, rot_pos_embed def forward_intermediates( self, x: torch.Tensor, indices: Optional[Union[int, List[int]]] = None, return_prefix_tokens: bool = False, norm: bool = False, stop_early: bool = False, output_fmt: str = 'NCHW', intermediates_only: bool = False, ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: """ Forward features that returns intermediates. Args: x: Input image tensor indices: Take last n blocks if an int, if is a sequence, select by matching indices return_prefix_tokens: Return both prefix and spatial intermediate tokens norm: Apply norm layer to all intermediates stop_early: Stop iterating over blocks when last desired intermediate hit output_fmt: Shape of intermediate feature outputs intermediates_only: Only return intermediate features """ assert output_fmt in ('NCHW', 'NLC'), 'Output format for EVA-ViT features must be one of NCHW or NLC.' reshape = output_fmt == 'NCHW' intermediates = [] take_indices, max_index = feature_take_indices(len(self.blocks), indices) # forward pass B, _, height, width = x.shape x = self.patch_embed(x) x, rot_pos_embed = self._pos_embed(x) x = self.norm_pre(x) if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript blocks = self.blocks else: blocks = self.blocks[:max_index + 1] # Handle depth-dependent embeddings for mixed mode if getattr(self, 'rope_mixed', False) and rot_pos_embed is not None: for i, blk in enumerate(blocks): if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(blk, x, rope=rot_pos_embed[i]) else: x = blk(x, rope=rot_pos_embed[i]) if i in take_indices: intermediates.append(self.norm(x) if norm else x) else: for i, blk in enumerate(blocks): if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(blk, x, rope=rot_pos_embed) else: x = blk(x, rope=rot_pos_embed) if i in take_indices: intermediates.append(self.norm(x) if norm else x) # process intermediates if self.num_prefix_tokens: # split prefix (e.g. class, distill) and spatial feature tokens prefix_tokens = [y[:, 0:self.num_prefix_tokens] for y in intermediates] intermediates = [y[:, self.num_prefix_tokens:] for y in intermediates] if reshape: # reshape to BCHW output format H, W = self.patch_embed.dynamic_feat_size((height, width)) intermediates = [y.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() for y in intermediates] if not torch.jit.is_scripting() and return_prefix_tokens: # return_prefix not support in torchscript due to poor type handling intermediates = list(zip(intermediates, prefix_tokens)) if intermediates_only: return intermediates x = self.norm(x) return x, intermediates def prune_intermediate_layers( self, indices: Union[int, List[int]] = 1, prune_norm: bool = False, prune_head: bool = True, ): """ Prune layers not required for specified intermediates. """ take_indices, max_index = feature_take_indices(len(self.blocks), indices) self.blocks = self.blocks[:max_index + 1] # truncate blocks if prune_norm: self.norm = nn.Identity() if prune_head: self.attn_pool = None self.fc_norm = nn.Identity() self.reset_classifier(0, '') return take_indices def pool(self, x: torch.Tensor, pool_type: Optional[str] = None) -> torch.Tensor: if self.attn_pool is not None: x = self.attn_pool(x) return x pool_type = self.global_pool if pool_type is None else pool_type x = global_pool_nlc(x, pool_type=pool_type, num_prefix_tokens=self.num_prefix_tokens) return x def forward_features(self, x: torch.Tensor) -> torch.Tensor: """Forward pass through feature extraction layers. Args: x: Input tensor. Returns: Feature tensor. """ x = self.patch_embed(x) x, rot_pos_embed = self._pos_embed(x) x = self.norm_pre(x) if getattr(self, 'rope_mixed', False) and rot_pos_embed is not None: # Handle depth-dependent embeddings for mixed mode # pos embed has shape (depth, num_heads, H*W, dim) or (depth, batch_size, num_heads, H*W, dim) for i, blk in enumerate(self.blocks): if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(blk, x, rope=rot_pos_embed[i]) else: x = blk(x, rope=rot_pos_embed[i]) else: # Standard path for non-mixed mode for blk in self.blocks: if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(blk, x, rope=rot_pos_embed) else: x = blk(x, rope=rot_pos_embed) x = self.norm(x) return x def forward_head(self, x: torch.Tensor, pre_logits: bool = False) -> torch.Tensor: """Forward pass through classifier head. Args: x: Feature tensor. pre_logits: Return pre-logits if True. Returns: Output tensor. """ x = self.pool(x) x = self.fc_norm(x) x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x: torch.Tensor) -> torch.Tensor: """Forward pass. Args: x: Input tensor. Returns: Output tensor. """ x = self.forward_features(x) x = self.forward_head(x) return x def _convert_pe( state_dict: Dict[str, torch.Tensor], model: nn.Module, prefix: str = 'visual.', ) -> Dict[str, torch.Tensor]: """Convert Perception Encoder weights. Args: state_dict: State dictionary to convert. model: Target model instance. prefix: Prefix to strip from keys. Returns: Converted state dictionary. """ state_dict = state_dict.get('model', state_dict) state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()} out_dict = {} swaps = [ ('conv1', 'patch_embed.proj'), ('positional_embedding', 'pos_embed'), ('transformer.resblocks.', 'blocks.'), ('ln_pre', 'norm_pre'), ('ln_post', 'norm'), ('ln_', 'norm'), ('ls_1.gamma', 'gamma_1'), ('ls_2.gamma', 'gamma_2'), ('in_proj_', 'qkv.'), ('out_proj', 'proj'), ('mlp.c_fc', 'mlp.fc1'), ('mlp.c_proj', 'mlp.fc2'), ] len_prefix = len(prefix) for k, v in state_dict.items(): if prefix: if not k.startswith(prefix): continue k = k[len_prefix:] for sp in swaps: k = k.replace(sp[0], sp[1]) if k.startswith('attn_pool'): k = k.replace('attn_pool.attn', 'attn_pool') k = k.replace('attn_pool.layernorm', 'attn_pool.norm') k = k.replace('attn_pool.probe', 'attn_pool.latent') if k.startswith('attn_pool.qkv'): dim = v.shape[0] // 3 if k.endswith('weight'): out_dict['attn_pool.q.weight'] = v[:dim] out_dict['attn_pool.kv.weight'] = v[dim:] elif k.endswith('bias'): out_dict['attn_pool.q.bias'] = v[:dim] out_dict['attn_pool.kv.bias'] = v[dim:] continue elif k == 'proj': k = 'head.weight' v = v.transpose(0, 1) out_dict['head.bias'] = torch.zeros(v.shape[0]) elif k == 'class_embedding': k = 'cls_token' v = v.unsqueeze(0).unsqueeze(1) elif k == 'pos_embed': v = v.unsqueeze(0) out_dict[k] = v return out_dict def checkpoint_filter_fn( state_dict: Dict[str, torch.Tensor], model: nn.Module, interpolation: str = 'bicubic', antialias: bool = True, ) -> Dict[str, torch.Tensor]: """Convert patch embedding weight from manual patchify + linear proj to conv. Args: state_dict: Checkpoint state dictionary. model: Target model instance. interpolation: Interpolation method for resizing. antialias: Whether to use antialiasing when resizing. Returns: Filtered state dictionary. """ out_dict = {} # Standard EVA checkpoint processing state_dict = state_dict.get('model_ema', state_dict) state_dict = state_dict.get('model', state_dict) state_dict = state_dict.get('module', state_dict) state_dict = state_dict.get('state_dict', state_dict) # Loading Meta PE (Perception Encoder) weights if 'visual.conv1.weight' in state_dict: return _convert_pe(state_dict, model) elif 'conv1.weight' in state_dict: return _convert_pe(state_dict, model, prefix='') # prefix for loading OpenCLIP compatible weights if 'visual.trunk.pos_embed' in state_dict: prefix = 'visual.trunk.' elif 'visual.pos_embed' in state_dict: prefix = 'visual.' else: prefix = '' mim_weights = prefix + 'mask_token' in state_dict no_qkv = prefix + 'blocks.0.attn.q_proj.weight' in state_dict len_prefix = len(prefix) for k, v in state_dict.items(): if prefix: if not k.startswith(prefix): continue k = k[len_prefix:] if 'rope' in k and not k == 'rope.freqs': # fixed embedding no need to load buffer from checkpoint continue if 'patch_embed.proj.weight' in k: _, _, H, W = model.patch_embed.proj.weight.shape if v.shape[-1] != W or v.shape[-2] != H: v = resample_patch_embed( v, (H, W), interpolation=interpolation, antialias=antialias, verbose=True, ) elif k == 'pos_embed' and v.shape[1] != model.pos_embed.shape[1]: # To resize pos embedding when using model at different size from pretrained weights num_prefix_tokens = 0 if getattr(model, 'no_embed_class', False) else getattr(model, 'num_prefix_tokens', 1) v = resample_abs_pos_embed( v, new_size=model.patch_embed.grid_size, num_prefix_tokens=num_prefix_tokens, interpolation=interpolation, antialias=antialias, verbose=True, ) k = k.replace('mlp.ffn_ln', 'mlp.norm') k = k.replace('attn.inner_attn_ln', 'attn.norm') k = k.replace('mlp.w12', 'mlp.fc1') k = k.replace('mlp.w1', 'mlp.fc1_g') k = k.replace('mlp.w2', 'mlp.fc1_x') k = k.replace('mlp.w3', 'mlp.fc2') if no_qkv: k = k.replace('q_bias', 'q_proj.bias') k = k.replace('v_bias', 'v_proj.bias') if mim_weights and k in ('mask_token', 'lm_head.weight', 'lm_head.bias', 'norm.weight', 'norm.bias'): if k == 'norm.weight' or k == 'norm.bias': # try moving norm -> fc norm on fine-tune, probably a better starting point than new init k = k.replace('norm', 'fc_norm') else: # skip pretrain mask token & head weights continue out_dict[k] = v return out_dict def _create_eva(variant: str, pretrained: bool = False, **kwargs) -> Eva: """Create an EVA model. Args: variant: Model variant name. pretrained: Load pretrained weights. **kwargs: Additional model arguments. Returns: Instantiated Eva model. """ # Check if we should use NaFlexVit implementation use_naflex = kwargs.pop('use_naflex', None) _USE_NAFLEX_DEFAULT = os.environ.get('TIMM_USE_NAFLEX', '0') == '1' if use_naflex is None: use_naflex = _USE_NAFLEX_DEFAULT if use_naflex: # Import here to avoid circular imports from .naflexvit import _create_naflexvit_from_eva return _create_naflexvit_from_eva(variant, pretrained, **kwargs) out_indices = kwargs.pop('out_indices', 3) model = build_model_with_cfg( Eva, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), **kwargs, ) return model def _cfg(url: str = '', **kwargs) -> Dict[str, Any]: """Generate default configuration for EVA models. Args: url: Model weights URL. **kwargs: Additional configuration parameters. Returns: Model configuration dictionary. """ return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': OPENAI_CLIP_MEAN, 'std': OPENAI_CLIP_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head', 'license': 'mit', **kwargs } def _pe_cfg(url: str = '', **kwargs) -> Dict[str, Any]: """Generate default configuration for Perception Encoder models. Args: url: Model weights URL. **kwargs: Additional configuration parameters. Returns: Model configuration dictionary. """ return { 'url': url, 'num_classes': 0, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': 1.0, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), 'first_conv': 'patch_embed.proj', 'classifier': 'head', 'license': 'custom', **kwargs } default_cfgs = generate_default_cfgs({ # EVA 01 CLIP fine-tuned on imagenet-1k 'eva_giant_patch14_224.clip_ft_in1k': _cfg( # hf_hub_id='BAAI/EVA', hf_hub_filename='eva_clip_vis_enc_sz224_ftcls_89p1.pt', hf_hub_id='timm/', ), 'eva_giant_patch14_336.clip_ft_in1k': _cfg( # hf_hub_id='BAAI/EVA', hf_hub_filename='eva_clip_vis_enc_sz336_ftcls_89p4.pt', hf_hub_id='timm/', input_size=(3, 336, 336), crop_pct=1.0, crop_mode='squash'), # MIM EVA 01 pretrain, ft on in22k -> in1k 'eva_giant_patch14_336.m30m_ft_in22k_in1k': _cfg( # hf_hub_id='BAAI/EVA', hf_hub_filename='eva_21k_1k_336px_psz14_ema_89p6.pt', hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, input_size=(3, 336, 336), crop_pct=1.0, crop_mode='squash'), 'eva_giant_patch14_560.m30m_ft_in22k_in1k': _cfg( # hf_hub_id='BAAI/EVA', hf_hub_filename='eva_21k_1k_560px_psz14_ema_89p7.pt', hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, input_size=(3, 560, 560), crop_pct=1.0, crop_mode='squash'), # in22k or m38m MIM pretrain w/ intermediate in22k fine-tune and final in1k fine-tune 'eva02_base_patch14_448.mim_in22k_ft_in22k_in1k': _cfg( # hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in21k_to_in1k/eva02_B_pt_in21k_medft_in21k_ft_in1k_p14.pt', hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash', ), 'eva02_large_patch14_448.mim_in22k_ft_in22k_in1k': _cfg( # hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in21k_to_in1k/eva02_L_pt_in21k_medft_in21k_ft_in1k_p14.pt', hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash', ), 'eva02_large_patch14_448.mim_m38m_ft_in22k_in1k': _cfg( hf_hub_id='timm/', #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in21k_to_in1k/eva02_L_pt_m38m_medft_in21k_ft_in1k_p14.pt', input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash', ), # in22k or m3m MIM pretrain w/ in1k fine-tune 'eva02_tiny_patch14_336.mim_in22k_ft_in1k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in1k/eva02_Ti_pt_in21k_ft_in1k_p14.pt', hf_hub_id='timm/', input_size=(3, 336, 336), crop_pct=1.0, ), 'eva02_small_patch14_336.mim_in22k_ft_in1k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in1k/eva02_S_pt_in21k_ft_in1k_p14.pt', hf_hub_id='timm/', input_size=(3, 336, 336), crop_pct=1.0, ), 'eva02_base_patch14_448.mim_in22k_ft_in1k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in1k/eva02_B_pt_in21k_ft_in1k_p14.pt', hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, ), 'eva02_large_patch14_448.mim_in22k_ft_in1k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in1k/eva02_L_pt_in21k_ft_in1k_p14.pt', hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, ), 'eva02_large_patch14_448.mim_m38m_ft_in1k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in1k/eva02_L_pt_m38m_ft_in1k_p14.pt', hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, ), # in22k or m3m MIM pretrain w/ in22k fine-tune 'eva02_base_patch14_448.mim_in22k_ft_in22k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in21k/eva02_B_pt_in21k_medft_in21k_p14.pt', hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash', num_classes=21841, ), 'eva02_large_patch14_448.mim_in22k_ft_in22k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in21k/eva02_L_pt_in21k_medft_in21k_p14.pt', hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash', num_classes=21841, ), 'eva02_large_patch14_448.mim_m38m_ft_in22k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in21k/eva02_L_pt_m38m_medft_in21k_p14.pt', hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash', num_classes=21841, ), # in22k or m38m MIM pretrain 'eva02_tiny_patch14_224.mim_in22k': _cfg( # hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/pt/eva02_Ti_pt_in21k_p14.pt', hf_hub_id='timm/', num_classes=0, ), 'eva02_small_patch14_224.mim_in22k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/pt/eva02_S_pt_in21k_p14.pt', hf_hub_id='timm/', num_classes=0, ), 'eva02_base_patch14_224.mim_in22k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/pt/eva02_B_pt_in21k_p14.pt', hf_hub_id='timm/', num_classes=0, ), 'eva02_large_patch14_224.mim_in22k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/pt/eva02_L_pt_in21k_p14.pt', hf_hub_id='timm/', num_classes=0, ), 'eva02_large_patch14_224.mim_m38m': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/pt/eva02_L_pt_m38m_p14.pt', hf_hub_id='timm/', num_classes=0, ), # EVA01 and EVA02 CLIP image towers 'eva_giant_patch14_clip_224.laion400m': _cfg( # hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA01_CLIP_g_14_plus_psz14_s11B.pt', # hf_hub_id='timm/eva_giant_patch14_clip_224.laion400m_s11b_b41k', # float16 weights # hf_hub_filename='open_clip_pytorch_model.bin', hf_hub_id='timm/', num_classes=1024, ), 'eva_giant_patch14_clip_224.merged2b': _cfg( # hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA01_CLIP_g_14_plus_psz14_s11B.pt', # hf_hub_id='timm/eva_giant_patch14_plus_clip_224.merged2b_s11b_b114k', # float16 weights # hf_hub_filename='open_clip_pytorch_model.bin', hf_hub_id='timm/', num_classes=1024, ), 'eva02_base_patch16_clip_224.merged2b': _cfg( # hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA02_CLIP_L_psz14_s4B.pt', # hf_hub_id='timm/eva02_base_patch16_clip_224.merged2b_s8b_b131k', # float16 weights # hf_hub_filename='open_clip_pytorch_model.bin', hf_hub_id='timm/', num_classes=512, ), 'eva02_large_patch14_clip_224.merged2b': _cfg( # hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA02_CLIP_L_psz14_s4B.pt', # hf_hub_id='timm/eva02_large_patch14_clip_224.merged2b_s4b_b131k', # float16 weights # hf_hub_filename='open_clip_pytorch_model.bin', hf_hub_id='timm/', num_classes=768, ), 'eva02_large_patch14_clip_336.merged2b': _cfg( # hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA02_CLIP_L_psz14_s4B.pt', # hf_hub_id='timm/eva02_large_patch14_clip_336.merged2b_s6b_b61k', # float16 weights # hf_hub_filename='open_clip_pytorch_model.bin', hf_hub_id='timm/', input_size=(3, 336, 336), crop_pct=1.0, num_classes=768, ), 'eva02_enormous_patch14_clip_224.laion2b': _cfg( # hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA02_CLIP_E_psz14_plus_s9B.pt', # hf_hub_id='timm/eva02_enormous_patch14_clip_224.laion2b_s4b_b115k', # float16 weights # hf_hub_filename='open_clip_pytorch_model.bin', hf_hub_id='timm/', num_classes=1024, ), 'eva02_enormous_patch14_clip_224.laion2b_plus': _cfg( # hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA02_CLIP_E_psz14_plus_s9B.pt', # hf_hub_id='timm/eva02_enormous_patch14_plus_clip_224.laion2b_s9b_b144k', # bfloat16 weights # hf_hub_filename='open_clip_pytorch_model.bin', hf_hub_id='timm/', num_classes=1024, ), 'eva02_enormous_patch14_clip_224.pretrain': _cfg( # hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA02_E_psz14.pt', num_classes=0, ), 'vit_medium_patch16_rope_reg1_gap_256.sbb_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95, mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5) ), 'vit_mediumd_patch16_rope_reg1_gap_256.sbb_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95, mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5) ), 'vit_betwixt_patch16_rope_reg4_gap_256.sbb_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95, ), 'vit_base_patch16_rope_reg1_gap_256.sbb_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95, mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5) ), # Perception Encoder weights 'vit_pe_core_tiny_patch16_384.fb': _pe_cfg( hf_hub_id='timm/', #hf_hub_id='facebook/PE-Core-T16-384', #hf_hub_filename='PE-Core-T16-384.pt', input_size=(3, 384, 384), num_classes=512, # output proj dim ), 'vit_pe_core_small_patch16_384.fb': _pe_cfg( hf_hub_id='timm/', #hf_hub_id='facebook/PE-Core-S16-384', #hf_hub_filename='PE-Core-S16-384.pt', input_size=(3, 384, 384), num_classes=512, # output proj dim ), 'vit_pe_core_base_patch16_224.fb': _pe_cfg( hf_hub_id='timm/', #hf_hub_id='facebook/PE-Core-B16-224', #hf_hub_filename='PE-Core-B16-224.pt', input_size=(3, 224, 224), num_classes=1024, # output proj dim ), 'vit_pe_core_large_patch14_336.fb': _pe_cfg( hf_hub_id='timm/', #hf_hub_id='facebook/PE-Core-L14-336', #hf_hub_filename='PE-Core-L14-336.pt', input_size=(3, 336, 336), num_classes=1024, # output proj dim ), 'vit_pe_core_gigantic_patch14_448.fb': _pe_cfg( hf_hub_id='timm/', #hf_hub_id='facebook/PE-Core-G14-448', #hf_hub_filename='PE-Core-G14-448.pt', input_size=(3, 448, 448), num_classes=1280, # output proj dim ), 'vit_pe_lang_large_patch14_448.fb': _pe_cfg( hf_hub_id='timm/', #hf_hub_id='facebook/PE-Lang-L14-448', #hf_hub_filename='PE-Lang-L14-448.pt', input_size=(3, 448, 448), num_classes=0, ), 'vit_pe_lang_large_patch14_448.fb_tiling': _pe_cfg( hf_hub_id='timm/', #hf_hub_id='facebook/PE-Lang-L14-448-Tiling', #hf_hub_filename='PE-Lang-L14-448-Tiling.pt', input_size=(3, 448, 448), num_classes=0, ), 'vit_pe_lang_gigantic_patch14_448.fb': _pe_cfg( hf_hub_id='timm/', #hf_hub_id='facebook/PE-Lang-G14-448', #hf_hub_filename='PE-Lang-G14-448.pt', input_size=(3, 448, 448), num_classes=0, ), 'vit_pe_lang_gigantic_patch14_448.fb_tiling': _pe_cfg( hf_hub_id='timm/', #hf_hub_id='facebook/PE-Lang-G14-448-Tiling', #hf_hub_filename='PE-Lang-G14-448-Tiling.pt', input_size=(3, 448, 448), num_classes=0, ), 'vit_pe_spatial_tiny_patch16_512.fb': _pe_cfg( hf_hub_id='timm/', #hf_hub_id='facebook/PE-Spatial-T16-512', #hf_hub_filename='PE-Spatial-T16-512.pt', input_size=(3, 512, 512), num_classes=0, ), 'vit_pe_spatial_small_patch16_512.fb': _pe_cfg( hf_hub_id='timm/', #hf_hub_id='facebook/PE-Spatial-S16-512', #hf_hub_filename='PE-Spatial-S16-512.pt', input_size=(3, 512, 512), num_classes=0, ), 'vit_pe_spatial_base_patch16_512.fb': _pe_cfg( hf_hub_id='timm/', #hf_hub_id='facebook/PE-Spatial-B16-512', #hf_hub_filename='PE-Spatial-B16-512.pt', input_size=(3, 512, 512), num_classes=0, ), 'vit_pe_spatial_large_patch14_448.fb': _pe_cfg( hf_hub_id='timm/', #hf_hub_id='facebook/PE-Spatial-L14-448', #hf_hub_filename='PE-Spatial-L14-448.pt', input_size=(3, 448, 448), num_classes=0, ), 'vit_pe_spatial_gigantic_patch14_448.fb': _pe_cfg( hf_hub_id='timm/', #hf_hub_id='facebook/PE-Spatial-G14-448', #hf_hub_filename='PE-Spatial-G14-448.pt', input_size=(3, 448, 448), num_classes=0, ), # RoPE-ViT models from Naver 'vit_small_patch16_rope_224.naver_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, license='apache-2.0', ), 'vit_base_patch16_rope_224.naver_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, license='apache-2.0', ), 'vit_large_patch16_rope_224.naver_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, license='apache-2.0', ), 'vit_small_patch16_rope_mixed_224.naver_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, license='apache-2.0', ), 'vit_base_patch16_rope_mixed_224.naver_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, license='apache-2.0', ), 'vit_large_patch16_rope_mixed_224.naver_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, license='apache-2.0', ), 'vit_small_patch16_rope_ape_224.naver_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, license='apache-2.0', ), 'vit_base_patch16_rope_ape_224.naver_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, license='apache-2.0', ), 'vit_large_patch16_rope_ape_224.naver_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, license='apache-2.0', ), 'vit_small_patch16_rope_mixed_ape_224.naver_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, license='apache-2.0', ), 'vit_base_patch16_rope_mixed_ape_224.naver_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, license='apache-2.0', ), 'vit_large_patch16_rope_mixed_ape_224.naver_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, license='apache-2.0', ), }) @register_model def eva_giant_patch14_224(pretrained: bool = False, **kwargs) -> Eva: """EVA-g model https://arxiv.org/abs/2211.07636""" model_args = dict(patch_size=14, embed_dim=1408, depth=40, num_heads=16, mlp_ratio=6144 / 1408) model = _create_eva('eva_giant_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva_giant_patch14_336(pretrained: bool = False, **kwargs) -> Eva: """EVA-g model https://arxiv.org/abs/2211.07636""" model_args = dict(patch_size=14, embed_dim=1408, depth=40, num_heads=16, mlp_ratio=6144 / 1408) model = _create_eva('eva_giant_patch14_336', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva_giant_patch14_560(pretrained: bool = False, **kwargs) -> Eva: """EVA-g model https://arxiv.org/abs/2211.07636""" model_args = dict(patch_size=14, embed_dim=1408, depth=40, num_heads=16, mlp_ratio=6144 / 1408) model = _create_eva('eva_giant_patch14_560', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_tiny_patch14_224(pretrained: bool = False, **kwargs) -> Eva: """EVA02 Tiny https://arxiv.org/abs/2303.11331""" model_args = dict( img_size=224, patch_size=14, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4 * 2 / 3, swiglu_mlp=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 ) model = _create_eva('eva02_tiny_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_small_patch14_224(pretrained: bool = False, **kwargs) -> Eva: """EVA02 Small https://arxiv.org/abs/2303.11331""" model_args = dict( img_size=224, patch_size=14, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4 * 2 / 3, swiglu_mlp=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 ) model = _create_eva('eva02_small_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_base_patch14_224(pretrained: bool = False, **kwargs) -> Eva: """EVA02 Base https://arxiv.org/abs/2303.11331""" model_args = dict( img_size=224, patch_size=14, embed_dim=768, depth=12, num_heads=12, qkv_fused=False, mlp_ratio=4 * 2 / 3, swiglu_mlp=True, scale_mlp=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 ) model = _create_eva('eva02_base_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_large_patch14_224(pretrained: bool = False, **kwargs) -> Eva: """EVA02 Large https://arxiv.org/abs/2303.11331""" model_args = dict( img_size=224, patch_size=14, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4 * 2 / 3, qkv_fused=False, swiglu_mlp=True, scale_mlp=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 ) model = _create_eva('eva02_large_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_tiny_patch14_336(pretrained: bool = False, **kwargs) -> Eva: """EVA02 Tiny https://arxiv.org/abs/2303.11331""" model_args = dict( img_size=336, patch_size=14, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4 * 2 / 3, swiglu_mlp=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 ) model = _create_eva('eva02_tiny_patch14_336', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_small_patch14_336(pretrained: bool = False, **kwargs) -> Eva: """EVA02 Small https://arxiv.org/abs/2303.11331""" model_args = dict( img_size=336, patch_size=14, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4 * 2 / 3, swiglu_mlp=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 ) model = _create_eva('eva02_small_patch14_336', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_base_patch14_448(pretrained: bool = False, **kwargs) -> Eva: """EVA02 Base https://arxiv.org/abs/2303.11331""" model_args = dict( img_size=448, patch_size=14, embed_dim=768, depth=12, num_heads=12, qkv_fused=False, mlp_ratio=4 * 2 / 3, swiglu_mlp=True, scale_mlp=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 ) model = _create_eva('eva02_base_patch14_448', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_large_patch14_448(pretrained: bool = False, **kwargs) -> Eva: """EVA02 Large https://arxiv.org/abs/2303.11331""" model_args = dict( img_size=448, patch_size=14, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4 * 2 / 3, qkv_fused=False, swiglu_mlp=True, scale_mlp=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 ) model = _create_eva('eva02_large_patch14_448', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva_giant_patch14_clip_224(pretrained: bool = False, **kwargs) -> Eva: """EVA-g CLIP model (only difference from non-CLIP is the pooling)""" model_args = dict( patch_size=14, embed_dim=1408, depth=40, num_heads=16, mlp_ratio=6144 / 1408, global_pool=kwargs.pop('global_pool', 'token')) model = _create_eva('eva_giant_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_base_patch16_clip_224(pretrained: bool = False, **kwargs) -> Eva: """An EVA-CLIP specific variant that adds additional attn scale layer-norm to eva02_base""" model_args = dict( img_size=224, patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_fused=False, mlp_ratio=4 * 2 / 3, swiglu_mlp=True, scale_mlp=True, scale_attn_inner=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 global_pool=kwargs.pop('global_pool', 'token'), ) model = _create_eva('eva02_base_patch16_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_large_patch14_clip_224(pretrained: bool = False, **kwargs) -> Eva: """An EVA-CLIP specific variant that adds additional attn scale layer-norm to eva02_large""" model_args = dict( img_size=224, patch_size=14, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4 * 2 / 3, qkv_fused=False, swiglu_mlp=True, scale_mlp=True, scale_attn_inner=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 global_pool=kwargs.pop('global_pool', 'token'), ) model = _create_eva('eva02_large_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_large_patch14_clip_336(pretrained: bool = False, **kwargs) -> Eva: """An EVA-CLIP specific variant that adds additional attn scale layer-norm to eva02_large""" model_args = dict( img_size=336, patch_size=14, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4 * 2 / 3, qkv_fused=False, swiglu_mlp=True, scale_mlp=True, scale_attn_inner=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 global_pool=kwargs.pop('global_pool', 'token'), ) model = _create_eva('eva02_large_patch14_clip_336', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_enormous_patch14_clip_224(pretrained: bool = False, **kwargs) -> Eva: """An EVA-CLIP specific variant that uses residual post-norm in blocks""" model_args = dict( img_size=224, patch_size=14, embed_dim=1792, depth=64, num_heads=16, mlp_ratio=15360 / 1792, use_post_norm=True, global_pool=kwargs.pop('global_pool', 'token'), ) model = _create_eva('eva02_enormous_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_medium_patch16_rope_reg1_gap_256(pretrained: bool = False, **kwargs) -> Eva: """timm SBB ViT with ROPE""" model_args = dict( img_size=256, patch_size=16, embed_dim=512, depth=12, num_heads=8, qkv_fused=True, qkv_bias=True, init_values=1e-5, class_token=False, num_reg_tokens=1, use_rot_pos_emb=True, use_abs_pos_emb=False, ref_feat_shape=(16, 16), # 224/14 ) model = _create_eva('vit_medium_patch16_rope_reg1_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_mediumd_patch16_rope_reg1_gap_256(pretrained: bool = False, **kwargs) -> Eva: """timm SBB ViT with ROPE""" model_args = dict( img_size=256, patch_size=16, embed_dim=512, depth=20, num_heads=8, qkv_fused=True, qkv_bias=False, init_values=1e-5, class_token=False, num_reg_tokens=1, use_rot_pos_emb=True, use_abs_pos_emb=False, ref_feat_shape=(16, 16), # 224/14 ) model = _create_eva('vit_mediumd_patch16_rope_reg1_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_betwixt_patch16_rope_reg4_gap_256(pretrained: bool = False, **kwargs) -> Eva: """timm SBB ViT with ROPE""" model_args = dict( img_size=256, patch_size=16, embed_dim=640, depth=12, num_heads=10, qkv_fused=True, qkv_bias=True, init_values=1e-5, class_token=False, num_reg_tokens=4, use_rot_pos_emb=True, use_abs_pos_emb=False, ref_feat_shape=(16, 16), # 224/14 ) model = _create_eva('vit_betwixt_patch16_rope_reg4_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_rope_reg1_gap_256(pretrained: bool = False, **kwargs) -> Eva: """timm SBB ViT with ROPE""" model_args = dict( img_size=256, patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_fused=True, qkv_bias=True, init_values=1e-5, class_token=False, num_reg_tokens=1, use_rot_pos_emb=True, use_abs_pos_emb=False, ref_feat_shape=(16, 16), # 224/14 ) model = _create_eva('vit_base_patch16_rope_reg1_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_pe_core_tiny_patch16_384(pretrained: bool = False, **kwargs) -> Eva: """Perception Encoder (PE) ViT from Meta (https://arxiv.org/abs/2504.13181)""" model_args = dict( patch_size=16, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4.0, global_pool='map', attn_type='rope', use_pre_transformer_norm=True, use_rot_pos_emb=True, ref_feat_shape=(24, 24), rope_grid_offset=1., rope_grid_indexing='xy', attn_pool_num_heads=8, attn_pool_mlp_ratio=4., norm_layer=partial(LayerNorm, eps=1e-5), #dynamic_img_size=True ) return _create_eva('vit_pe_core_tiny_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def vit_pe_core_small_patch16_384(pretrained: bool = False, **kwargs) -> Eva: """Perception Encoder (PE) ViT from Meta (https://arxiv.org/abs/2504.13181)""" model_args = dict( patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4.0, global_pool='map', attn_type='rope', use_pre_transformer_norm=True, use_rot_pos_emb=True, ref_feat_shape=(24, 24), rope_grid_offset=1., rope_grid_indexing='xy', attn_pool_num_heads=8, attn_pool_mlp_ratio=4., norm_layer=partial(LayerNorm, eps=1e-5), #dynamic_img_size=True ) return _create_eva('vit_pe_core_small_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def vit_pe_core_base_patch16_224(pretrained: bool = False, **kwargs) -> Eva: """Perception Encoder (PE) ViT from Meta (https://arxiv.org/abs/2504.13181)""" model_args = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4.0, global_pool='map', attn_type='rope', use_pre_transformer_norm=True, use_rot_pos_emb=True, ref_feat_shape=(14, 14), rope_grid_offset=1., rope_grid_indexing='xy', attn_pool_num_heads=8, attn_pool_mlp_ratio=4., norm_layer=partial(LayerNorm, eps=1e-5), #dynamic_img_size=True ) return _create_eva('vit_pe_core_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def vit_pe_core_large_patch14_336(pretrained: bool = False, **kwargs) -> Eva: """Perception Encoder (PE) ViT from Meta (https://arxiv.org/abs/2504.13181)""" model_args = dict( patch_size=14, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4.0, global_pool='map', attn_type='rope', use_pre_transformer_norm=True, use_rot_pos_emb=True, ref_feat_shape=(24, 24), rope_grid_offset=1., rope_grid_indexing='xy', attn_pool_num_heads=8, attn_pool_mlp_ratio=4., norm_layer=partial(LayerNorm, eps=1e-5), #dynamic_img_size=True, ) return _create_eva('vit_pe_core_large_patch14_336', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def vit_pe_core_gigantic_patch14_448(pretrained: bool = False, **kwargs) -> Eva: """Perception Encoder (PE) ViT from Meta (https://arxiv.org/abs/2504.13181)""" model_args = dict( patch_size=14, embed_dim=1536, depth=50, num_heads=16, mlp_ratio=8960 / 1536, global_pool='map', attn_type='rope', class_token=False, use_pre_transformer_norm=True, use_rot_pos_emb=True, ref_feat_shape=(32, 32), rope_grid_indexing='xy', attn_pool_num_heads=8, attn_pool_mlp_ratio=4., norm_layer=partial(LayerNorm, eps=1e-5), #dynamic_img_size=True, ) return _create_eva('vit_pe_core_gigantic_patch14_448', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def vit_pe_lang_large_patch14_448(pretrained: bool = False, **kwargs) -> Eva: """Perception Encoder (PE) ViT from Meta (https://arxiv.org/abs/2504.13181)""" model_args = dict( patch_size=14, embed_dim=1024, depth=23, num_heads=16, mlp_ratio=4.0, attn_type='rope', class_token=True, use_rot_pos_emb=True, ref_feat_shape=(32, 32), rope_grid_offset=1., rope_grid_indexing='xy', use_pre_transformer_norm=True, use_post_transformer_norm=False, use_fc_norm=False, # explicitly disable init_values=0.1, norm_layer=partial(LayerNorm, eps=1e-5), #dynamic_img_size=True, ) return _create_eva('vit_pe_lang_large_patch14_448', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def vit_pe_lang_gigantic_patch14_448(pretrained: bool = False, **kwargs) -> Eva: """Perception Encoder (PE) ViT from Meta (https://arxiv.org/abs/2504.13181)""" model_args = dict( patch_size=14, embed_dim=1536, depth=47, num_heads=16, mlp_ratio=8960 / 1536, attn_type='rope', class_token=False, use_rot_pos_emb=True, ref_feat_shape=(32, 32), rope_grid_indexing='xy', use_pre_transformer_norm=True, use_post_transformer_norm=False, use_fc_norm=False, # explicitly disable init_values=0.1, norm_layer=partial(LayerNorm, eps=1e-5), #dynamic_img_size=True, ) return _create_eva('vit_pe_lang_gigantic_patch14_448', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def vit_pe_spatial_tiny_patch16_512(pretrained: bool = False, **kwargs) -> Eva: """Perception Encoder (PE) ViT from Meta (https://arxiv.org/abs/2504.13181)""" model_args = dict( patch_size=16, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4.0, attn_type='rope', use_pre_transformer_norm=True, use_post_transformer_norm=False, use_fc_norm=False, # explicitly disable use_rot_pos_emb=True, ref_feat_shape=(32, 32), rope_grid_offset=1., rope_grid_indexing='xy', norm_layer=partial(LayerNorm, eps=1e-5), #dynamic_img_size=True ) return _create_eva('vit_pe_spatial_tiny_patch16_512', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def vit_pe_spatial_small_patch16_512(pretrained: bool = False, **kwargs) -> Eva: """Perception Encoder (PE) ViT from Meta (https://arxiv.org/abs/2504.13181)""" model_args = dict( patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4.0, attn_type='rope', use_pre_transformer_norm=True, use_post_transformer_norm=False, use_fc_norm=False, # explicitly disable use_rot_pos_emb=True, ref_feat_shape=(32, 32), rope_grid_offset=1., rope_grid_indexing='xy', norm_layer=partial(LayerNorm, eps=1e-5), #dynamic_img_size=True ) return _create_eva('vit_pe_spatial_small_patch16_512', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def vit_pe_spatial_base_patch16_512(pretrained: bool = False, **kwargs) -> Eva: """Perception Encoder (PE) ViT from Meta (https://arxiv.org/abs/2504.13181)""" model_args = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4.0, attn_type='rope', use_pre_transformer_norm=True, use_post_transformer_norm=False, use_fc_norm=False, # explicitly disable use_rot_pos_emb=True, ref_feat_shape=(32, 32), rope_grid_offset=1., rope_grid_indexing='xy', norm_layer=partial(LayerNorm, eps=1e-5), #dynamic_img_size=True ) return _create_eva('vit_pe_spatial_base_patch16_512', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def vit_pe_spatial_large_patch14_448(pretrained: bool = False, **kwargs) -> Eva: """Perception Encoder (PE) ViT from Meta (https://arxiv.org/abs/2504.13181)""" model_args = dict( patch_size=14, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4.0, attn_type='rope', use_pre_transformer_norm=True, use_post_transformer_norm=False, use_fc_norm=False, # explicitly disable use_rot_pos_emb=True, ref_feat_shape=(32, 32), rope_grid_offset=1., rope_grid_indexing='xy', norm_layer=partial(LayerNorm, eps=1e-5), #dynamic_img_size=True, ) return _create_eva('vit_pe_spatial_large_patch14_448', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def vit_pe_spatial_gigantic_patch14_448(pretrained: bool = False, **kwargs) -> Eva: """Perception Encoder (PE) ViT from Meta (https://arxiv.org/abs/2504.13181)""" model_args = dict( patch_size=14, embed_dim=1536, depth=50, num_heads=16, mlp_ratio=8960 / 1536, attn_type='rope', class_token=False, use_rot_pos_emb=True, ref_feat_shape=(32, 32), rope_grid_indexing='xy', use_pre_transformer_norm=True, use_post_transformer_norm=False, use_fc_norm=False, # explicitly disable init_values=0.1, norm_layer=partial(LayerNorm, eps=1e-5), #dynamic_img_size=True, ) return _create_eva('vit_pe_spatial_gigantic_patch14_448', pretrained=pretrained, **dict(model_args, **kwargs)) # RoPE-ViT models from https://github.com/naver-ai/rope-vit @register_model def vit_small_patch16_rope_224(pretrained: bool = False, **kwargs) -> Eva: """RoPE-Axial ViT-S/16 from https://github.com/naver-ai/rope-vit""" model_args = dict( patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, attn_type='rope', qkv_bias=True, init_values=1e-5, class_token=True, global_pool='token', use_abs_pos_emb=False, use_rot_pos_emb=True, rope_grid_indexing='xy', rope_temperature=100.0, ) model = _create_eva('vit_small_patch16_rope_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_rope_224(pretrained: bool = False, **kwargs) -> Eva: """RoPE-Axial ViT-B/16 from https://github.com/naver-ai/rope-vit""" model_args = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, attn_type='rope', use_fc_norm=False, qkv_bias=True, init_values=1e-5, class_token=True, global_pool='token', use_abs_pos_emb=False, use_rot_pos_emb=True, rope_grid_indexing='xy', rope_temperature=100.0, ) model = _create_eva('vit_base_patch16_rope_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch16_rope_224(pretrained: bool = False, **kwargs) -> Eva: """RoPE-Axial ViT-L/16 from https://github.com/naver-ai/rope-vit""" model_args = dict( patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, attn_type='rope', qkv_bias=True, init_values=1e-5, class_token=True, global_pool='token', use_abs_pos_emb=False, use_rot_pos_emb=True, rope_grid_indexing='xy', rope_temperature=100.0, ) model = _create_eva('vit_large_patch16_rope_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_patch16_rope_mixed_224(pretrained: bool = False, **kwargs) -> Eva: """RoPE-Mixed ViT-S/16 from https://github.com/naver-ai/rope-vit""" model_args = dict( patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, attn_type='rope', qkv_bias=True, init_values=1e-5, class_token=True, global_pool='token', use_abs_pos_emb=False, use_rot_pos_emb=True, rope_grid_indexing='xy', rope_temperature=10.0, rope_mixed_mode=True, ) model = _create_eva('vit_small_patch16_rope_mixed_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_rope_mixed_224(pretrained: bool = False, **kwargs) -> Eva: """RoPE-Mixed ViT-B/16 from https://github.com/naver-ai/rope-vit""" model_args = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True, attn_type='rope', init_values=1e-5, class_token=True, global_pool='token', use_abs_pos_emb=False, use_rot_pos_emb=True, rope_grid_indexing='xy', rope_temperature=10.0, rope_mixed_mode=True, ) model = _create_eva('vit_base_patch16_rope_mixed_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch16_rope_mixed_224(pretrained: bool = False, **kwargs) -> Eva: """RoPE-Mixed ViT-L/16 from https://github.com/naver-ai/rope-vit""" model_args = dict( patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, attn_type='rope', qkv_bias=True, init_values=1e-5, class_token=True, global_pool='token', use_abs_pos_emb=False, use_rot_pos_emb=True, rope_grid_indexing='xy', rope_temperature=10.0, rope_mixed_mode=True, ) model = _create_eva('vit_large_patch16_rope_mixed_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model # APE variants (with absolute position embeddings) @register_model def vit_small_patch16_rope_ape_224(pretrained: bool = False, **kwargs) -> Eva: """RoPE-Axial + APE ViT-S/16 from https://github.com/naver-ai/rope-vit""" model_args = dict( patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, attn_type='rope', qkv_bias=True, init_values=1e-5, class_token=True, global_pool='token', no_embed_class=True, use_abs_pos_emb=True, use_rot_pos_emb=True, rope_grid_indexing='xy', rope_temperature=100.0, ) model = _create_eva('vit_small_patch16_rope_ape_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_rope_ape_224(pretrained: bool = False, **kwargs) -> Eva: """RoPE-Axial + APE ViT-B/16 from https://github.com/naver-ai/rope-vit""" model_args = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, attn_type='rope', qkv_bias=True, init_values=1e-5, class_token=True, global_pool='token', no_embed_class=True, use_abs_pos_emb=True, use_rot_pos_emb=True, rope_grid_indexing='xy', rope_temperature=100.0, ) model = _create_eva('vit_base_patch16_rope_ape_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch16_rope_ape_224(pretrained: bool = False, **kwargs) -> Eva: """RoPE-Axial + APE ViT-L/16 from https://github.com/naver-ai/rope-vit""" model_args = dict( patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, attn_type='rope', qkv_bias=True, init_values=1e-5, class_token=True, global_pool='token', no_embed_class=True, use_abs_pos_emb=True, use_rot_pos_emb=True, rope_grid_indexing='xy', rope_temperature=100.0, ) model = _create_eva('vit_large_patch16_rope_ape_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_patch16_rope_mixed_ape_224(pretrained: bool = False, **kwargs) -> Eva: """RoPE-Mixed + APE ViT-S/16 from https://github.com/naver-ai/rope-vit""" model_args = dict( patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, attn_type='rope', qkv_bias=True, init_values=1e-5, class_token=True, global_pool='token', no_embed_class=True, use_abs_pos_emb=True, use_rot_pos_emb=True, rope_grid_indexing='xy', rope_temperature=10.0, rope_mixed_mode=True, ) model = _create_eva('vit_small_patch16_rope_mixed_ape_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_rope_mixed_ape_224(pretrained: bool = False, **kwargs) -> Eva: """RoPE-Mixed + APE ViT-B/16 from https://github.com/naver-ai/rope-vit""" model_args = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, attn_type='rope', qkv_bias=True, init_values=1e-5, class_token=True, global_pool='token', no_embed_class=True, use_abs_pos_emb=True, use_rot_pos_emb=True, rope_grid_indexing='xy', rope_temperature=10.0, rope_mixed_mode=True, ) model = _create_eva('vit_base_patch16_rope_mixed_ape_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch16_rope_mixed_ape_224(pretrained: bool = False, **kwargs) -> Eva: """RoPE-Mixed + APE ViT-L/16 from https://github.com/naver-ai/rope-vit""" model_args = dict( patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, attn_type='rope', qkv_bias=True, init_values=1e-5, class_token=True, global_pool='token', no_embed_class=True, use_abs_pos_emb=True, use_rot_pos_emb=True, rope_grid_indexing='xy', rope_temperature=10.0, rope_mixed_mode=True, ) model = _create_eva('vit_large_patch16_rope_mixed_ape_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model
pytorch-image-models/timm/models/eva.py/0
{ "file_path": "pytorch-image-models/timm/models/eva.py", "repo_id": "pytorch-image-models", "token_count": 47165 }
260
""" InceptionNeXt paper: https://arxiv.org/abs/2303.16900 Original implementation & weights from: https://github.com/sail-sg/inceptionnext """ from functools import partial from typing import List, Optional, Tuple, Union import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import trunc_normal_, DropPath, to_2tuple, get_padding, SelectAdaptivePool2d from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._manipulate import checkpoint_seq from ._registry import register_model, generate_default_cfgs __all__ = ['MetaNeXt'] class InceptionDWConv2d(nn.Module): """ Inception depthwise convolution """ def __init__( self, in_chs, square_kernel_size=3, band_kernel_size=11, branch_ratio=0.125, dilation=1, ): super().__init__() gc = int(in_chs * branch_ratio) # channel numbers of a convolution branch square_padding = get_padding(square_kernel_size, dilation=dilation) band_padding = get_padding(band_kernel_size, dilation=dilation) self.dwconv_hw = nn.Conv2d( gc, gc, square_kernel_size, padding=square_padding, dilation=dilation, groups=gc) self.dwconv_w = nn.Conv2d( gc, gc, (1, band_kernel_size), padding=(0, band_padding), dilation=(1, dilation), groups=gc) self.dwconv_h = nn.Conv2d( gc, gc, (band_kernel_size, 1), padding=(band_padding, 0), dilation=(dilation, 1), groups=gc) self.split_indexes = (in_chs - 3 * gc, gc, gc, gc) def forward(self, x): x_id, x_hw, x_w, x_h = torch.split(x, self.split_indexes, dim=1) return torch.cat(( x_id, self.dwconv_hw(x_hw), self.dwconv_w(x_w), self.dwconv_h(x_h) ), dim=1, ) class ConvMlp(nn.Module): """ MLP using 1x1 convs that keeps spatial dims copied from timm: https://github.com/huggingface/pytorch-image-models/blob/v0.6.11/timm/models/layers/mlp.py """ def __init__( self, in_features, hidden_features=None, out_features=None, act_layer=nn.ReLU, norm_layer=None, bias=True, drop=0., ): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features bias = to_2tuple(bias) self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1, bias=bias[0]) self.norm = norm_layer(hidden_features) if norm_layer else nn.Identity() self.act = act_layer() self.drop = nn.Dropout(drop) self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1, bias=bias[1]) def forward(self, x): x = self.fc1(x) x = self.norm(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) return x class MlpClassifierHead(nn.Module): """ MLP classification head """ def __init__( self, in_features, num_classes=1000, pool_type='avg', mlp_ratio=3, act_layer=nn.GELU, norm_layer=partial(nn.LayerNorm, eps=1e-6), drop=0., bias=True ): super().__init__() self.use_conv = False self.in_features = in_features self.num_features = hidden_features = int(mlp_ratio * in_features) assert pool_type, 'Cannot disable pooling' self.global_pool = SelectAdaptivePool2d(pool_type=pool_type, flatten=True) self.fc1 = nn.Linear(in_features * self.global_pool.feat_mult(), hidden_features, bias=bias) self.act = act_layer() self.norm = norm_layer(hidden_features) self.fc2 = nn.Linear(hidden_features, num_classes, bias=bias) self.drop = nn.Dropout(drop) def reset(self, num_classes: int, pool_type: Optional[str] = None): if pool_type is not None: assert pool_type, 'Cannot disable pooling' self.global_pool = SelectAdaptivePool2d(pool_type=pool_type, flatten=True) self.fc2 = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() def forward(self, x, pre_logits: bool = False): x = self.global_pool(x) x = self.fc1(x) x = self.act(x) x = self.norm(x) x = self.drop(x) return x if pre_logits else self.fc2(x) class MetaNeXtBlock(nn.Module): """ MetaNeXtBlock Block Args: dim (int): Number of input channels. drop_path (float): Stochastic depth rate. Default: 0.0 ls_init_value (float): Init value for Layer Scale. Default: 1e-6. """ def __init__( self, dim, dilation=1, token_mixer=InceptionDWConv2d, norm_layer=nn.BatchNorm2d, mlp_layer=ConvMlp, mlp_ratio=4, act_layer=nn.GELU, ls_init_value=1e-6, drop_path=0., ): super().__init__() self.token_mixer = token_mixer(dim, dilation=dilation) self.norm = norm_layer(dim) self.mlp = mlp_layer(dim, int(mlp_ratio * dim), act_layer=act_layer) self.gamma = nn.Parameter(ls_init_value * torch.ones(dim)) if ls_init_value else None self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x): shortcut = x x = self.token_mixer(x) x = self.norm(x) x = self.mlp(x) if self.gamma is not None: x = x.mul(self.gamma.reshape(1, -1, 1, 1)) x = self.drop_path(x) + shortcut return x class MetaNeXtStage(nn.Module): def __init__( self, in_chs, out_chs, stride=2, depth=2, dilation=(1, 1), drop_path_rates=None, ls_init_value=1.0, token_mixer=InceptionDWConv2d, act_layer=nn.GELU, norm_layer=None, mlp_ratio=4, ): super().__init__() self.grad_checkpointing = False if stride > 1 or dilation[0] != dilation[1]: self.downsample = nn.Sequential( norm_layer(in_chs), nn.Conv2d( in_chs, out_chs, kernel_size=2, stride=stride, dilation=dilation[0], ), ) else: self.downsample = nn.Identity() drop_path_rates = drop_path_rates or [0.] * depth stage_blocks = [] for i in range(depth): stage_blocks.append(MetaNeXtBlock( dim=out_chs, dilation=dilation[1], drop_path=drop_path_rates[i], ls_init_value=ls_init_value, token_mixer=token_mixer, act_layer=act_layer, norm_layer=norm_layer, mlp_ratio=mlp_ratio, )) self.blocks = nn.Sequential(*stage_blocks) def forward(self, x): x = self.downsample(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) return x class MetaNeXt(nn.Module): r""" MetaNeXt A PyTorch impl of : `InceptionNeXt: When Inception Meets ConvNeXt` - https://arxiv.org/abs/2303.16900 Args: in_chans (int): Number of input image channels. Default: 3 num_classes (int): Number of classes for classification head. Default: 1000 depths (tuple(int)): Number of blocks at each stage. Default: (3, 3, 9, 3) dims (tuple(int)): Feature dimension at each stage. Default: (96, 192, 384, 768) token_mixers: Token mixer function. Default: nn.Identity norm_layer: Normalization layer. Default: nn.BatchNorm2d act_layer: Activation function for MLP. Default: nn.GELU mlp_ratios (int or tuple(int)): MLP ratios. Default: (4, 4, 4, 3) drop_rate (float): Head dropout rate drop_path_rate (float): Stochastic depth rate. Default: 0. ls_init_value (float): Init value for Layer Scale. Default: 1e-6. """ def __init__( self, in_chans=3, num_classes=1000, global_pool='avg', output_stride=32, depths=(3, 3, 9, 3), dims=(96, 192, 384, 768), token_mixers=InceptionDWConv2d, norm_layer=nn.BatchNorm2d, act_layer=nn.GELU, mlp_ratios=(4, 4, 4, 3), drop_rate=0., drop_path_rate=0., ls_init_value=1e-6, ): super().__init__() num_stage = len(depths) if not isinstance(token_mixers, (list, tuple)): token_mixers = [token_mixers] * num_stage if not isinstance(mlp_ratios, (list, tuple)): mlp_ratios = [mlp_ratios] * num_stage self.num_classes = num_classes self.global_pool = global_pool self.drop_rate = drop_rate self.feature_info = [] self.stem = nn.Sequential( nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4), norm_layer(dims[0]) ) dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] prev_chs = dims[0] curr_stride = 4 dilation = 1 # feature resolution stages, each consisting of multiple residual blocks self.stages = nn.Sequential() for i in range(num_stage): stride = 2 if curr_stride == 2 or i > 0 else 1 if curr_stride >= output_stride and stride > 1: dilation *= stride stride = 1 curr_stride *= stride first_dilation = 1 if dilation in (1, 2) else 2 out_chs = dims[i] self.stages.append(MetaNeXtStage( prev_chs, out_chs, stride=stride if i > 0 else 1, dilation=(first_dilation, dilation), depth=depths[i], drop_path_rates=dp_rates[i], ls_init_value=ls_init_value, act_layer=act_layer, token_mixer=token_mixers[i], norm_layer=norm_layer, mlp_ratio=mlp_ratios[i], )) prev_chs = out_chs self.feature_info += [dict(num_chs=prev_chs, reduction=curr_stride, module=f'stages.{i}')] self.num_features = prev_chs self.head = MlpClassifierHead(self.num_features, num_classes, pool_type=self.global_pool, drop=drop_rate) self.head_hidden_size = self.head.num_features self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, (nn.Conv2d, nn.Linear)): trunc_normal_(m.weight, std=.02) if m.bias is not None: nn.init.constant_(m.bias, 0) @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^stem', blocks=r'^stages\.(\d+)' if coarse else [ (r'^stages\.(\d+)\.downsample', (0,)), # blocks (r'^stages\.(\d+)\.blocks\.(\d+)', None), ] ) @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head.fc2 def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): self.num_classes = num_classes self.head.reset(num_classes, global_pool) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in self.stages: s.grad_checkpointing = enable @torch.jit.ignore def no_weight_decay(self): return set() def forward_intermediates( self, x: torch.Tensor, indices: Optional[Union[int, List[int]]] = None, norm: bool = False, stop_early: bool = False, output_fmt: str = 'NCHW', intermediates_only: bool = False, ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: """ Forward features that returns intermediates. Args: x: Input image tensor indices: Take last n blocks if int, all if None, select matching indices if sequence norm: Apply norm layer to compatible intermediates stop_early: Stop iterating over blocks when last desired intermediate hit output_fmt: Shape of intermediate feature outputs intermediates_only: Only return intermediate features Returns: """ assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' intermediates = [] take_indices, max_index = feature_take_indices(len(self.stages), indices) # forward pass x = self.stem(x) if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript stages = self.stages else: stages = self.stages[:max_index + 1] for feat_idx, stage in enumerate(stages): x = stage(x) if feat_idx in take_indices: intermediates.append(x) if intermediates_only: return intermediates return x, intermediates def prune_intermediate_layers( self, indices: Union[int, List[int]] = 1, prune_norm: bool = False, prune_head: bool = True, ): """ Prune layers not required for specified intermediates. """ take_indices, max_index = feature_take_indices(len(self.stages), indices) self.stages = self.stages[:max_index + 1] # truncate blocks w/ stem as idx 0 if prune_head: self.reset_classifier(0, 'avg') return take_indices def forward_features(self, x): x = self.stem(x) x = self.stages(x) return x def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.0', 'classifier': 'head.fc2', **kwargs } default_cfgs = generate_default_cfgs({ 'inception_next_atto.sail_in1k': _cfg( hf_hub_id='timm/', # url='https://github.com/sail-sg/inceptionnext/releases/download/model/inceptionnext_atto.pth', ), 'inception_next_tiny.sail_in1k': _cfg( hf_hub_id='timm/', # url='https://github.com/sail-sg/inceptionnext/releases/download/model/inceptionnext_tiny.pth', ), 'inception_next_small.sail_in1k': _cfg( hf_hub_id='timm/', # url='https://github.com/sail-sg/inceptionnext/releases/download/model/inceptionnext_small.pth', ), 'inception_next_base.sail_in1k': _cfg( hf_hub_id='timm/', # url='https://github.com/sail-sg/inceptionnext/releases/download/model/inceptionnext_base.pth', crop_pct=0.95, ), 'inception_next_base.sail_in1k_384': _cfg( hf_hub_id='timm/', # url='https://github.com/sail-sg/inceptionnext/releases/download/model/inceptionnext_base_384.pth', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, ), }) def _create_inception_next(variant, pretrained=False, **kwargs): model = build_model_with_cfg( MetaNeXt, variant, pretrained, feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True), **kwargs, ) return model @register_model def inception_next_atto(pretrained=False, **kwargs): model_args = dict( depths=(2, 2, 6, 2), dims=(40, 80, 160, 320), token_mixers=partial(InceptionDWConv2d, band_kernel_size=9, branch_ratio=0.25) ) return _create_inception_next('inception_next_atto', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def inception_next_tiny(pretrained=False, **kwargs): model_args = dict( depths=(3, 3, 9, 3), dims=(96, 192, 384, 768), token_mixers=InceptionDWConv2d, ) return _create_inception_next('inception_next_tiny', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def inception_next_small(pretrained=False, **kwargs): model_args = dict( depths=(3, 3, 27, 3), dims=(96, 192, 384, 768), token_mixers=InceptionDWConv2d, ) return _create_inception_next('inception_next_small', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def inception_next_base(pretrained=False, **kwargs): model_args = dict( depths=(3, 3, 27, 3), dims=(128, 256, 512, 1024), token_mixers=InceptionDWConv2d, ) return _create_inception_next('inception_next_base', pretrained=pretrained, **dict(model_args, **kwargs))
pytorch-image-models/timm/models/inception_next.py/0
{ "file_path": "pytorch-image-models/timm/models/inception_next.py", "repo_id": "pytorch-image-models", "token_count": 8611 }
261
""" Nested Transformer (NesT) in PyTorch A PyTorch implement of Aggregating Nested Transformers as described in: 'Aggregating Nested Transformers' - https://arxiv.org/abs/2105.12723 The official Jax code is released and available at https://github.com/google-research/nested-transformer. The weights have been converted with convert/convert_nest_flax.py Acknowledgments: * The paper authors for sharing their research, code, and model weights * Ross Wightman's existing code off which I based this Copyright 2021 Alexander Soare """ import collections.abc import logging import math from functools import partial from typing import List, Optional, Tuple, Union import torch import torch.nn.functional as F from torch import nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import PatchEmbed, Mlp, DropPath, create_classifier, trunc_normal_, _assert from timm.layers import create_conv2d, create_pool2d, to_ntuple, use_fused_attn, LayerNorm from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._features_fx import register_notrace_function from ._manipulate import checkpoint_seq, named_apply from ._registry import register_model, generate_default_cfgs, register_model_deprecations __all__ = ['Nest'] # model_registry will add each entrypoint fn to this _logger = logging.getLogger(__name__) class Attention(nn.Module): """ This is much like `.vision_transformer.Attention` but uses *localised* self attention by accepting an input with an extra "image block" dim """ fused_attn: torch.jit.Final[bool] def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.fused_attn = use_fused_attn() self.qkv = nn.Linear(dim, 3*dim, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): """ x is shape: B (batch_size), T (image blocks), N (seq length per image block), C (embed dim) """ B, T, N, C = x.shape # result of next line is (qkv, B, num (H)eads, T, N, (C')hannels per head) qkv = self.qkv(x).reshape(B, T, N, 3, self.num_heads, C // self.num_heads).permute(3, 0, 4, 1, 2, 5) q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) if self.fused_attn: x = F.scaled_dot_product_attention(q, k, v, dropout_p=self.attn_drop.p if self.training else 0.) else: q = q * self.scale attn = q @ k.transpose(-2, -1) # (B, H, T, N, N) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v # (B, H, T, N, C'), permute -> (B, T, N, C', H) x = x.permute(0, 2, 3, 4, 1).reshape(B, T, N, C) x = self.proj(x) x = self.proj_drop(x) return x # (B, T, N, C) class TransformerLayer(nn.Module): """ This is much like `.vision_transformer.Block` but: - Called TransformerLayer here to allow for "block" as defined in the paper ("non-overlapping image blocks") - Uses modified Attention layer that handles the "block" dimension """ def __init__( self, dim, num_heads, mlp_ratio=4., qkv_bias=False, proj_drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, ): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention( dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop, ) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp( in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=proj_drop, ) def forward(self, x): y = self.norm1(x) x = x + self.drop_path(self.attn(y)) x = x + self.drop_path(self.mlp(self.norm2(x))) return x class ConvPool(nn.Module): def __init__(self, in_channels, out_channels, norm_layer, pad_type=''): super().__init__() self.conv = create_conv2d(in_channels, out_channels, kernel_size=3, padding=pad_type, bias=True) self.norm = norm_layer(out_channels) self.pool = create_pool2d('max', kernel_size=3, stride=2, padding=pad_type) def forward(self, x): """ x is expected to have shape (B, C, H, W) """ _assert(x.shape[-2] % 2 == 0, 'BlockAggregation requires even input spatial dims') _assert(x.shape[-1] % 2 == 0, 'BlockAggregation requires even input spatial dims') x = self.conv(x) # Layer norm done over channel dim only x = self.norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) x = self.pool(x) return x # (B, C, H//2, W//2) def blockify(x, block_size: int): """image to blocks Args: x (Tensor): with shape (B, H, W, C) block_size (int): edge length of a single square block in units of H, W """ B, H, W, C = x.shape _assert(H % block_size == 0, '`block_size` must divide input height evenly') _assert(W % block_size == 0, '`block_size` must divide input width evenly') grid_height = H // block_size grid_width = W // block_size x = x.reshape(B, grid_height, block_size, grid_width, block_size, C) x = x.transpose(2, 3).reshape(B, grid_height * grid_width, -1, C) return x # (B, T, N, C) @register_notrace_function # reason: int receives Proxy def deblockify(x, block_size: int): """blocks to image Args: x (Tensor): with shape (B, T, N, C) where T is number of blocks and N is sequence size per block block_size (int): edge length of a single square block in units of desired H, W """ B, T, _, C = x.shape grid_size = int(math.sqrt(T)) height = width = grid_size * block_size x = x.reshape(B, grid_size, grid_size, block_size, block_size, C) x = x.transpose(2, 3).reshape(B, height, width, C) return x # (B, H, W, C) class NestLevel(nn.Module): """ Single hierarchical level of a Nested Transformer """ def __init__( self, num_blocks, block_size, seq_length, num_heads, depth, embed_dim, prev_embed_dim=None, mlp_ratio=4., qkv_bias=True, proj_drop=0., attn_drop=0., drop_path=[], norm_layer=None, act_layer=None, pad_type='', ): super().__init__() self.block_size = block_size self.grad_checkpointing = False self.pos_embed = nn.Parameter(torch.zeros(1, num_blocks, seq_length, embed_dim)) if prev_embed_dim is not None: self.pool = ConvPool(prev_embed_dim, embed_dim, norm_layer=norm_layer, pad_type=pad_type) else: self.pool = nn.Identity() # Transformer encoder if len(drop_path): assert len(drop_path) == depth, 'Must provide as many drop path rates as there are transformer layers' self.transformer_encoder = nn.Sequential(*[ TransformerLayer( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, proj_drop=proj_drop, attn_drop=attn_drop, drop_path=drop_path[i], norm_layer=norm_layer, act_layer=act_layer, ) for i in range(depth)]) def forward(self, x): """ expects x as (B, C, H, W) """ x = self.pool(x) x = x.permute(0, 2, 3, 1) # (B, H', W', C), switch to channels last for transformer x = blockify(x, self.block_size) # (B, T, N, C') x = x + self.pos_embed if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.transformer_encoder, x) else: x = self.transformer_encoder(x) # (B, T, N, C') x = deblockify(x, self.block_size) # (B, H', W', C') # Channel-first for block aggregation, and generally to replicate convnet feature map at each stage return x.permute(0, 3, 1, 2) # (B, C, H', W') class Nest(nn.Module): """ Nested Transformer (NesT) A PyTorch impl of : `Aggregating Nested Transformers` - https://arxiv.org/abs/2105.12723 """ def __init__( self, img_size=224, in_chans=3, patch_size=4, num_levels=3, embed_dims=(128, 256, 512), num_heads=(4, 8, 16), depths=(2, 2, 20), num_classes=1000, mlp_ratio=4., qkv_bias=True, drop_rate=0., proj_drop_rate=0., attn_drop_rate=0., drop_path_rate=0.5, norm_layer=None, act_layer=None, pad_type='', weight_init='', global_pool='avg', ): """ Args: img_size (int, tuple): input image size in_chans (int): number of input channels patch_size (int): patch size num_levels (int): number of block hierarchies (T_d in the paper) embed_dims (int, tuple): embedding dimensions of each level num_heads (int, tuple): number of attention heads for each level depths (int, tuple): number of transformer layers for each level num_classes (int): number of classes for classification head mlp_ratio (int): ratio of mlp hidden dim to embedding dim for MLP of transformer layers qkv_bias (bool): enable bias for qkv if True drop_rate (float): dropout rate for MLP of transformer layers, MSA final projection layer, and classifier attn_drop_rate (float): attention dropout rate drop_path_rate (float): stochastic depth rate norm_layer: (nn.Module): normalization layer for transformer layers act_layer: (nn.Module): activation layer in MLP of transformer layers pad_type: str: Type of padding to use '' for PyTorch symmetric, 'same' for TF SAME weight_init: (str): weight init scheme global_pool: (str): type of pooling operation to apply to final feature map Notes: - Default values follow NesT-B from the original Jax code. - `embed_dims`, `num_heads`, `depths` should be ints or tuples with length `num_levels`. - For those following the paper, Table A1 may have errors! - https://github.com/google-research/nested-transformer/issues/2 """ super().__init__() for param_name in ['embed_dims', 'num_heads', 'depths']: param_value = locals()[param_name] if isinstance(param_value, collections.abc.Sequence): assert len(param_value) == num_levels, f'Require `len({param_name}) == num_levels`' embed_dims = to_ntuple(num_levels)(embed_dims) num_heads = to_ntuple(num_levels)(num_heads) depths = to_ntuple(num_levels)(depths) self.num_classes = num_classes self.num_features = self.head_hidden_size = embed_dims[-1] self.feature_info = [] norm_layer = norm_layer or LayerNorm act_layer = act_layer or nn.GELU self.drop_rate = drop_rate self.num_levels = num_levels if isinstance(img_size, collections.abc.Sequence): assert img_size[0] == img_size[1], 'Model only handles square inputs' img_size = img_size[0] assert img_size % patch_size == 0, '`patch_size` must divide `img_size` evenly' self.patch_size = patch_size # Number of blocks at each level self.num_blocks = (4 ** torch.arange(num_levels)).flip(0).tolist() assert (img_size // patch_size) % math.sqrt(self.num_blocks[0]) == 0, \ 'First level blocks don\'t fit evenly. Check `img_size`, `patch_size`, and `num_levels`' # Block edge size in units of patches # Hint: (img_size // patch_size) gives number of patches along edge of image. sqrt(self.num_blocks[0]) is the # number of blocks along edge of image self.block_size = int((img_size // patch_size) // math.sqrt(self.num_blocks[0])) # Patch embedding self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dims[0], flatten=False, ) self.num_patches = self.patch_embed.num_patches self.seq_length = self.num_patches // self.num_blocks[0] # Build up each hierarchical level levels = [] dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] prev_dim = None curr_stride = 4 for i in range(len(self.num_blocks)): dim = embed_dims[i] levels.append(NestLevel( self.num_blocks[i], self.block_size, self.seq_length, num_heads[i], depths[i], dim, prev_dim, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dp_rates[i], norm_layer=norm_layer, act_layer=act_layer, pad_type=pad_type, )) self.feature_info += [dict(num_chs=dim, reduction=curr_stride, module=f'levels.{i}')] prev_dim = dim curr_stride *= 2 self.levels = nn.Sequential(*levels) # Final normalization layer self.norm = norm_layer(embed_dims[-1]) # Classifier global_pool, head = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) self.global_pool = global_pool self.head_drop = nn.Dropout(drop_rate) self.head = head self.init_weights(weight_init) @torch.jit.ignore def init_weights(self, mode=''): assert mode in ('nlhb', '') head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0. for level in self.levels: trunc_normal_(level.pos_embed, std=.02, a=-2, b=2) named_apply(partial(_init_nest_weights, head_bias=head_bias), self) @torch.jit.ignore def no_weight_decay(self): return {f'level.{i}.pos_embed' for i in range(len(self.levels))} @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^patch_embed', # stem and embed blocks=[ (r'^levels\.(\d+)' if coarse else r'^levels\.(\d+)\.transformer_encoder\.(\d+)', None), (r'^levels\.(\d+)\.(?:pool|pos_embed)', (0,)), (r'^norm', (99999,)) ] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for l in self.levels: l.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head def reset_classifier(self, num_classes: int, global_pool: str = 'avg'): self.num_classes = num_classes self.global_pool, self.head = create_classifier( self.num_features, self.num_classes, pool_type=global_pool) def forward_intermediates( self, x: torch.Tensor, indices: Optional[Union[int, List[int]]] = None, norm: bool = False, stop_early: bool = False, output_fmt: str = 'NCHW', intermediates_only: bool = False, ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: """ Forward features that returns intermediates. Args: x: Input image tensor indices: Take last n blocks if int, all if None, select matching indices if sequence norm: Apply norm layer to compatible intermediates stop_early: Stop iterating over blocks when last desired intermediate hit output_fmt: Shape of intermediate feature outputs intermediates_only: Only return intermediate features Returns: """ assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' intermediates = [] take_indices, max_index = feature_take_indices(len(self.levels), indices) # forward pass x = self.patch_embed(x) last_idx = len(self.num_blocks) - 1 if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript stages = self.levels else: stages = self.levels[:max_index + 1] for feat_idx, stage in enumerate(stages): x = stage(x) if feat_idx in take_indices: if norm and feat_idx == last_idx: x_inter = self.norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) intermediates.append(x_inter) else: intermediates.append(x) if intermediates_only: return intermediates if feat_idx == last_idx: # Layer norm done over channel dim only (to NHWC and back) x = self.norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) return x, intermediates def prune_intermediate_layers( self, indices: Union[int, List[int]] = 1, prune_norm: bool = False, prune_head: bool = True, ): """ Prune layers not required for specified intermediates. """ take_indices, max_index = feature_take_indices(len(self.levels), indices) self.levels = self.levels[:max_index + 1] # truncate blocks w/ stem as idx 0 if prune_norm: self.norm = nn.Identity() if prune_head: self.reset_classifier(0, '') return take_indices def forward_features(self, x): x = self.patch_embed(x) x = self.levels(x) # Layer norm done over channel dim only (to NHWC and back) x = self.norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) return x def forward_head(self, x, pre_logits: bool = False): x = self.global_pool(x) x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _init_nest_weights(module: nn.Module, name: str = '', head_bias: float = 0.): """ NesT weight initialization Can replicate Jax implementation. Otherwise follows vision_transformer.py """ if isinstance(module, nn.Linear): if name.startswith('head'): trunc_normal_(module.weight, std=.02, a=-2, b=2) nn.init.constant_(module.bias, head_bias) else: trunc_normal_(module.weight, std=.02, a=-2, b=2) if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, nn.Conv2d): trunc_normal_(module.weight, std=.02, a=-2, b=2) if module.bias is not None: nn.init.zeros_(module.bias) def resize_pos_embed(posemb, posemb_new): """ Rescale the grid of position embeddings when loading from state_dict Expected shape of position embeddings is (1, T, N, C), and considers only square images """ _logger.info('Resized position embedding: %s to %s', posemb.shape, posemb_new.shape) seq_length_old = posemb.shape[2] num_blocks_new, seq_length_new = posemb_new.shape[1:3] size_new = int(math.sqrt(num_blocks_new*seq_length_new)) # First change to (1, C, H, W) posemb = deblockify(posemb, int(math.sqrt(seq_length_old))).permute(0, 3, 1, 2) posemb = F.interpolate(posemb, size=[size_new, size_new], mode='bicubic', align_corners=False) # Now change to new (1, T, N, C) posemb = blockify(posemb.permute(0, 2, 3, 1), int(math.sqrt(seq_length_new))) return posemb def checkpoint_filter_fn(state_dict, model): """ resize positional embeddings of pretrained weights """ pos_embed_keys = [k for k in state_dict.keys() if k.startswith('pos_embed_')] for k in pos_embed_keys: if state_dict[k].shape != getattr(model, k).shape: state_dict[k] = resize_pos_embed(state_dict[k], getattr(model, k)) return state_dict def _create_nest(variant, pretrained=False, **kwargs): model = build_model_with_cfg( Nest, variant, pretrained, feature_cfg=dict(out_indices=(0, 1, 2), flatten_sequential=True), pretrained_filter_fn=checkpoint_filter_fn, **kwargs, ) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': [14, 14], 'crop_pct': .875, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head', **kwargs } default_cfgs = generate_default_cfgs({ 'nest_base.untrained': _cfg(), 'nest_small.untrained': _cfg(), 'nest_tiny.untrained': _cfg(), # (weights from official Google JAX impl, require 'SAME' padding) 'nest_base_jx.goog_in1k': _cfg(hf_hub_id='timm/'), 'nest_small_jx.goog_in1k': _cfg(hf_hub_id='timm/'), 'nest_tiny_jx.goog_in1k': _cfg(hf_hub_id='timm/'), }) @register_model def nest_base(pretrained=False, **kwargs) -> Nest: """ Nest-B @ 224x224 """ model_kwargs = dict( embed_dims=(128, 256, 512), num_heads=(4, 8, 16), depths=(2, 2, 20), **kwargs) model = _create_nest('nest_base', pretrained=pretrained, **model_kwargs) return model @register_model def nest_small(pretrained=False, **kwargs) -> Nest: """ Nest-S @ 224x224 """ model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 20), **kwargs) model = _create_nest('nest_small', pretrained=pretrained, **model_kwargs) return model @register_model def nest_tiny(pretrained=False, **kwargs) -> Nest: """ Nest-T @ 224x224 """ model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 8), **kwargs) model = _create_nest('nest_tiny', pretrained=pretrained, **model_kwargs) return model @register_model def nest_base_jx(pretrained=False, **kwargs) -> Nest: """ Nest-B @ 224x224 """ kwargs.setdefault('pad_type', 'same') model_kwargs = dict( embed_dims=(128, 256, 512), num_heads=(4, 8, 16), depths=(2, 2, 20), **kwargs) model = _create_nest('nest_base_jx', pretrained=pretrained, **model_kwargs) return model @register_model def nest_small_jx(pretrained=False, **kwargs) -> Nest: """ Nest-S @ 224x224 """ kwargs.setdefault('pad_type', 'same') model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 20), **kwargs) model = _create_nest('nest_small_jx', pretrained=pretrained, **model_kwargs) return model @register_model def nest_tiny_jx(pretrained=False, **kwargs) -> Nest: """ Nest-T @ 224x224 """ kwargs.setdefault('pad_type', 'same') model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 8), **kwargs) model = _create_nest('nest_tiny_jx', pretrained=pretrained, **model_kwargs) return model register_model_deprecations(__name__, { 'jx_nest_base': 'nest_base_jx', 'jx_nest_small': 'nest_small_jx', 'jx_nest_tiny': 'nest_tiny_jx', })
pytorch-image-models/timm/models/nest.py/0
{ "file_path": "pytorch-image-models/timm/models/nest.py", "repo_id": "pytorch-image-models", "token_count": 11329 }
262
"""PyTorch SelecSLS Net example for ImageNet Classification License: CC BY 4.0 (https://creativecommons.org/licenses/by/4.0/legalcode) Author: Dushyant Mehta (@mehtadushy) SelecSLS (core) Network Architecture as proposed in "XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera, Mehta et al." https://arxiv.org/abs/1907.00837 Based on ResNet implementation in https://github.com/rwightman/pytorch-image-models and SelecSLS Net implementation in https://github.com/mehtadushy/SelecSLS-Pytorch """ from typing import List import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import create_classifier from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs __all__ = ['SelecSls'] # model_registry will add each entrypoint fn to this class SequentialList(nn.Sequential): def __init__(self, *args): super(SequentialList, self).__init__(*args) @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (List[torch.Tensor]) -> (List[torch.Tensor]) pass @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (torch.Tensor) -> (List[torch.Tensor]) pass def forward(self, x) -> List[torch.Tensor]: for module in self: x = module(x) return x class SelectSeq(nn.Module): def __init__(self, mode='index', index=0): super(SelectSeq, self).__init__() self.mode = mode self.index = index @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (List[torch.Tensor]) -> (torch.Tensor) pass @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (Tuple[torch.Tensor]) -> (torch.Tensor) pass def forward(self, x) -> torch.Tensor: if self.mode == 'index': return x[self.index] else: return torch.cat(x, dim=1) def conv_bn(in_chs, out_chs, k=3, stride=1, padding=None, dilation=1): if padding is None: padding = ((stride - 1) + dilation * (k - 1)) // 2 return nn.Sequential( nn.Conv2d(in_chs, out_chs, k, stride, padding=padding, dilation=dilation, bias=False), nn.BatchNorm2d(out_chs), nn.ReLU(inplace=True) ) class SelecSlsBlock(nn.Module): def __init__(self, in_chs, skip_chs, mid_chs, out_chs, is_first, stride, dilation=1): super(SelecSlsBlock, self).__init__() self.stride = stride self.is_first = is_first assert stride in [1, 2] # Process input with 4 conv blocks with the same number of input and output channels self.conv1 = conv_bn(in_chs, mid_chs, 3, stride, dilation=dilation) self.conv2 = conv_bn(mid_chs, mid_chs, 1) self.conv3 = conv_bn(mid_chs, mid_chs // 2, 3) self.conv4 = conv_bn(mid_chs // 2, mid_chs, 1) self.conv5 = conv_bn(mid_chs, mid_chs // 2, 3) self.conv6 = conv_bn(2 * mid_chs + (0 if is_first else skip_chs), out_chs, 1) def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]: if not isinstance(x, list): x = [x] assert len(x) in [1, 2] d1 = self.conv1(x[0]) d2 = self.conv3(self.conv2(d1)) d3 = self.conv5(self.conv4(d2)) if self.is_first: out = self.conv6(torch.cat([d1, d2, d3], 1)) return [out, out] else: return [self.conv6(torch.cat([d1, d2, d3, x[1]], 1)), x[1]] class SelecSls(nn.Module): """SelecSls42 / SelecSls60 / SelecSls84 Parameters ---------- cfg : network config dictionary specifying block type, feature, and head args num_classes : int, default 1000 Number of classification classes. in_chans : int, default 3 Number of input (color) channels. drop_rate : float, default 0. Dropout probability before classifier, for training global_pool : str, default 'avg' Global pooling type. One of 'avg', 'max', 'avgmax', 'catavgmax' """ def __init__(self, cfg, num_classes=1000, in_chans=3, drop_rate=0.0, global_pool='avg'): self.num_classes = num_classes super(SelecSls, self).__init__() self.stem = conv_bn(in_chans, 32, stride=2) self.features = SequentialList(*[cfg['block'](*block_args) for block_args in cfg['features']]) self.from_seq = SelectSeq() # from List[tensor] -> Tensor in module compatible way self.head = nn.Sequential(*[conv_bn(*conv_args) for conv_args in cfg['head']]) self.num_features = self.head_hidden_size = cfg['num_features'] self.feature_info = cfg['feature_info'] self.global_pool, self.head_drop, self.fc = create_classifier( self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate, ) for n, m in self.named_modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^stem', blocks=r'^features\.(\d+)', blocks_head=r'^head' ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.fc def reset_classifier(self, num_classes: int, global_pool: str = 'avg'): self.num_classes = num_classes self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): x = self.stem(x) x = self.features(x) x = self.head(self.from_seq(x)) return x def forward_head(self, x, pre_logits: bool = False): x = self.global_pool(x) x = self.head_drop(x) return x if pre_logits else self.fc(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_selecsls(variant, pretrained, **kwargs): cfg = {} feature_info = [dict(num_chs=32, reduction=2, module='stem.2')] if variant.startswith('selecsls42'): cfg['block'] = SelecSlsBlock # Define configuration of the network after the initial neck cfg['features'] = [ # in_chs, skip_chs, mid_chs, out_chs, is_first, stride (32, 0, 64, 64, True, 2), (64, 64, 64, 128, False, 1), (128, 0, 144, 144, True, 2), (144, 144, 144, 288, False, 1), (288, 0, 304, 304, True, 2), (304, 304, 304, 480, False, 1), ] feature_info.extend([ dict(num_chs=128, reduction=4, module='features.1'), dict(num_chs=288, reduction=8, module='features.3'), dict(num_chs=480, reduction=16, module='features.5'), ]) # Head can be replaced with alternative configurations depending on the problem feature_info.append(dict(num_chs=1024, reduction=32, module='head.1')) if variant == 'selecsls42b': cfg['head'] = [ (480, 960, 3, 2), (960, 1024, 3, 1), (1024, 1280, 3, 2), (1280, 1024, 1, 1), ] feature_info.append(dict(num_chs=1024, reduction=64, module='head.3')) cfg['num_features'] = 1024 else: cfg['head'] = [ (480, 960, 3, 2), (960, 1024, 3, 1), (1024, 1024, 3, 2), (1024, 1280, 1, 1), ] feature_info.append(dict(num_chs=1280, reduction=64, module='head.3')) cfg['num_features'] = 1280 elif variant.startswith('selecsls60'): cfg['block'] = SelecSlsBlock # Define configuration of the network after the initial neck cfg['features'] = [ # in_chs, skip_chs, mid_chs, out_chs, is_first, stride (32, 0, 64, 64, True, 2), (64, 64, 64, 128, False, 1), (128, 0, 128, 128, True, 2), (128, 128, 128, 128, False, 1), (128, 128, 128, 288, False, 1), (288, 0, 288, 288, True, 2), (288, 288, 288, 288, False, 1), (288, 288, 288, 288, False, 1), (288, 288, 288, 416, False, 1), ] feature_info.extend([ dict(num_chs=128, reduction=4, module='features.1'), dict(num_chs=288, reduction=8, module='features.4'), dict(num_chs=416, reduction=16, module='features.8'), ]) # Head can be replaced with alternative configurations depending on the problem feature_info.append(dict(num_chs=1024, reduction=32, module='head.1')) if variant == 'selecsls60b': cfg['head'] = [ (416, 756, 3, 2), (756, 1024, 3, 1), (1024, 1280, 3, 2), (1280, 1024, 1, 1), ] feature_info.append(dict(num_chs=1024, reduction=64, module='head.3')) cfg['num_features'] = 1024 else: cfg['head'] = [ (416, 756, 3, 2), (756, 1024, 3, 1), (1024, 1024, 3, 2), (1024, 1280, 1, 1), ] feature_info.append(dict(num_chs=1280, reduction=64, module='head.3')) cfg['num_features'] = 1280 elif variant == 'selecsls84': cfg['block'] = SelecSlsBlock # Define configuration of the network after the initial neck cfg['features'] = [ # in_chs, skip_chs, mid_chs, out_chs, is_first, stride (32, 0, 64, 64, True, 2), (64, 64, 64, 144, False, 1), (144, 0, 144, 144, True, 2), (144, 144, 144, 144, False, 1), (144, 144, 144, 144, False, 1), (144, 144, 144, 144, False, 1), (144, 144, 144, 304, False, 1), (304, 0, 304, 304, True, 2), (304, 304, 304, 304, False, 1), (304, 304, 304, 304, False, 1), (304, 304, 304, 304, False, 1), (304, 304, 304, 304, False, 1), (304, 304, 304, 512, False, 1), ] feature_info.extend([ dict(num_chs=144, reduction=4, module='features.1'), dict(num_chs=304, reduction=8, module='features.6'), dict(num_chs=512, reduction=16, module='features.12'), ]) # Head can be replaced with alternative configurations depending on the problem cfg['head'] = [ (512, 960, 3, 2), (960, 1024, 3, 1), (1024, 1024, 3, 2), (1024, 1280, 3, 1), ] cfg['num_features'] = 1280 feature_info.extend([ dict(num_chs=1024, reduction=32, module='head.1'), dict(num_chs=1280, reduction=64, module='head.3') ]) else: raise ValueError('Invalid net configuration ' + variant + ' !!!') cfg['feature_info'] = feature_info # this model can do 6 feature levels by default, unlike most others, leave as 0-4 to avoid surprises? return build_model_with_cfg( SelecSls, variant, pretrained, model_cfg=cfg, feature_cfg=dict(out_indices=(0, 1, 2, 3, 4), flatten_sequential=True), **kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (4, 4), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.0', 'classifier': 'fc', **kwargs } default_cfgs = generate_default_cfgs({ 'selecsls42.untrained': _cfg( interpolation='bicubic'), 'selecsls42b.in1k': _cfg( hf_hub_id='timm/', interpolation='bicubic'), 'selecsls60.in1k': _cfg( hf_hub_id='timm/', interpolation='bicubic'), 'selecsls60b.in1k': _cfg( hf_hub_id='timm/', interpolation='bicubic'), 'selecsls84.untrained': _cfg( interpolation='bicubic'), }) @register_model def selecsls42(pretrained=False, **kwargs) -> SelecSls: """Constructs a SelecSls42 model. """ return _create_selecsls('selecsls42', pretrained, **kwargs) @register_model def selecsls42b(pretrained=False, **kwargs) -> SelecSls: """Constructs a SelecSls42_B model. """ return _create_selecsls('selecsls42b', pretrained, **kwargs) @register_model def selecsls60(pretrained=False, **kwargs) -> SelecSls: """Constructs a SelecSls60 model. """ return _create_selecsls('selecsls60', pretrained, **kwargs) @register_model def selecsls60b(pretrained=False, **kwargs) -> SelecSls: """Constructs a SelecSls60_B model. """ return _create_selecsls('selecsls60b', pretrained, **kwargs) @register_model def selecsls84(pretrained=False, **kwargs) -> SelecSls: """Constructs a SelecSls84 model. """ return _create_selecsls('selecsls84', pretrained, **kwargs)
pytorch-image-models/timm/models/selecsls.py/0
{ "file_path": "pytorch-image-models/timm/models/selecsls.py", "repo_id": "pytorch-image-models", "token_count": 6452 }
263
""" Vision Transformer (ViT) in PyTorch A PyTorch implement of Vision Transformers as described in: 'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale' - https://arxiv.org/abs/2010.11929 `How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers` - https://arxiv.org/abs/2106.10270 `FlexiViT: One Model for All Patch Sizes` - https://arxiv.org/abs/2212.08013 The official jax code is released and available at * https://github.com/google-research/vision_transformer * https://github.com/google-research/big_vision Acknowledgments: * The paper authors for releasing code and weights, thanks! * I fixed my class token impl based on Phil Wang's https://github.com/lucidrains/vit-pytorch * Simple transformer style inspired by Andrej Karpathy's https://github.com/karpathy/minGPT * Bert reference code checks against Huggingface Transformers and Tensorflow Bert Hacked together by / Copyright 2020, Ross Wightman """ import copy import logging import math import os from collections import OrderedDict from functools import partial from typing import Any, Callable, Dict, Optional, Set, Tuple, Type, Union, List try: from typing import Literal except ImportError: from typing_extensions import Literal import torch import torch.nn as nn import torch.nn.functional as F from torch.jit import Final from timm.data import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD, OPENAI_CLIP_MEAN, OPENAI_CLIP_STD ) from timm.layers import ( Attention, AttentionPoolLatent, PatchEmbed, Mlp, SwiGLUPacked, SwiGLU, LayerNorm, RmsNorm, DropPath, PatchDropout, trunc_normal_, lecun_normal_, resample_patch_embed, resample_abs_pos_embed, use_fused_attn, get_act_layer, get_norm_layer, maybe_add_mask, LayerType, ) from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._manipulate import named_apply, checkpoint, checkpoint_seq, adapt_input_conv from ._registry import generate_default_cfgs, register_model, register_model_deprecations __all__ = ['VisionTransformer'] # model_registry will add each entrypoint fn to this _logger = logging.getLogger(__name__) class LayerScale(nn.Module): """Layer scale module. References: - https://arxiv.org/abs/2103.17239 """ def __init__( self, dim: int, init_values: float = 1e-5, inplace: bool = False, ) -> None: """Initialize LayerScale module. Args: dim: Dimension. init_values: Initial value for scaling. inplace: If True, perform inplace operations. """ super().__init__() self.inplace = inplace self.gamma = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x: torch.Tensor) -> torch.Tensor: """Apply layer scaling.""" return x.mul_(self.gamma) if self.inplace else x * self.gamma class Block(nn.Module): """Transformer block with pre-normalization.""" def __init__( self, dim: int, num_heads: int, mlp_ratio: float = 4., qkv_bias: bool = False, qk_norm: bool = False, scale_attn_norm: bool = False, scale_mlp_norm: bool = False, proj_bias: bool = True, proj_drop: float = 0., attn_drop: float = 0., init_values: Optional[float] = None, drop_path: float = 0., act_layer: Type[nn.Module] = nn.GELU, norm_layer: Type[nn.Module] = LayerNorm, mlp_layer: Type[nn.Module] = Mlp, ) -> None: """Initialize Block. Args: dim: Number of input channels. num_heads: Number of attention heads. mlp_ratio: Ratio of mlp hidden dim to embedding dim. qkv_bias: If True, add a learnable bias to query, key, value. qk_norm: If True, apply normalization to query and key. proj_bias: If True, add bias to output projection. proj_drop: Projection dropout rate. attn_drop: Attention dropout rate. init_values: Initial values for layer scale. drop_path: Stochastic depth rate. act_layer: Activation layer. norm_layer: Normalization layer. mlp_layer: MLP layer. """ super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention( dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_norm=qk_norm, scale_norm=scale_attn_norm, proj_bias=proj_bias, attn_drop=attn_drop, proj_drop=proj_drop, norm_layer=norm_layer, ) self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = mlp_layer( in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, norm_layer=norm_layer if scale_mlp_norm else None, bias=proj_bias, drop=proj_drop, ) self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None) -> torch.Tensor: x = x + self.drop_path1(self.ls1(self.attn(self.norm1(x), attn_mask=attn_mask))) x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) return x class ResPostBlock(nn.Module): def __init__( self, dim: int, num_heads: int, mlp_ratio: float = 4., qkv_bias: bool = False, qk_norm: bool = False, scale_attn_norm: bool = False, scale_mlp_norm: bool = False, proj_bias: bool = True, proj_drop: float = 0., attn_drop: float = 0., init_values: Optional[float] = None, drop_path: float = 0., act_layer: Type[nn.Module] = nn.GELU, norm_layer: Type[nn.Module] = LayerNorm, mlp_layer: Type[nn.Module] = Mlp, ) -> None: super().__init__() self.init_values = init_values self.attn = Attention( dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_norm=qk_norm, scale_norm=scale_attn_norm, proj_bias=proj_bias, attn_drop=attn_drop, proj_drop=proj_drop, norm_layer=norm_layer, ) self.norm1 = norm_layer(dim) self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.mlp = mlp_layer( in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, norm_layer=norm_layer if scale_mlp_norm else None, bias=proj_bias, drop=proj_drop, ) self.norm2 = norm_layer(dim) self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.init_weights() def init_weights(self) -> None: # NOTE this init overrides that base model init with specific changes for the block type if self.init_values is not None: nn.init.constant_(self.norm1.weight, self.init_values) nn.init.constant_(self.norm2.weight, self.init_values) def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None) -> torch.Tensor: x = x + self.drop_path1(self.norm1(self.attn(x, attn_mask=attn_mask))) x = x + self.drop_path2(self.norm2(self.mlp(x))) return x class ParallelScalingBlock(nn.Module): """ Parallel ViT block (MLP & Attention in parallel) Based on: 'Scaling Vision Transformers to 22 Billion Parameters` - https://arxiv.org/abs/2302.05442 """ fused_attn: Final[bool] def __init__( self, dim: int, num_heads: int, mlp_ratio: float = 4., qkv_bias: bool = False, qk_norm: bool = False, scale_attn_norm: bool = False, scale_mlp_norm: bool = False, proj_bias: bool = True, proj_drop: float = 0., attn_drop: float = 0., init_values: Optional[float] = None, drop_path: float = 0., act_layer: Type[nn.Module] = nn.GELU, norm_layer: Type[nn.Module] = LayerNorm, mlp_layer: Optional[Type[nn.Module]] = None, ) -> None: super().__init__() assert dim % num_heads == 0, 'dim should be divisible by num_heads' assert not scale_attn_norm and not scale_mlp_norm, 'Scale norms not supported' self.num_heads = num_heads self.head_dim = dim // num_heads self.scale = self.head_dim ** -0.5 self.fused_attn = use_fused_attn() mlp_hidden_dim = int(mlp_ratio * dim) in_proj_out_dim = mlp_hidden_dim + 3 * dim self.in_norm = norm_layer(dim) self.in_proj = nn.Linear(dim, in_proj_out_dim, bias=qkv_bias) self.in_split = [mlp_hidden_dim] + [dim] * 3 if qkv_bias: self.register_buffer('qkv_bias', None) self.register_parameter('mlp_bias', None) else: self.register_buffer('qkv_bias', torch.zeros(3 * dim), persistent=False) self.mlp_bias = nn.Parameter(torch.zeros(mlp_hidden_dim)) self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() self.attn_drop = nn.Dropout(attn_drop) self.attn_out_proj = nn.Linear(dim, dim, bias=proj_bias) self.mlp_drop = nn.Dropout(proj_drop) self.mlp_act = act_layer() self.mlp_out_proj = nn.Linear(mlp_hidden_dim, dim, bias=proj_bias) self.ls = LayerScale(dim, init_values=init_values) if init_values is not None else nn.Identity() self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None) -> torch.Tensor: B, N, C = x.shape # Combined MLP fc1 & qkv projections y = self.in_norm(x) if self.mlp_bias is not None: # Concat constant zero-bias for qkv w/ trainable mlp_bias. # Appears faster than adding to x_mlp separately y = F.linear(y, self.in_proj.weight, torch.cat((self.qkv_bias, self.mlp_bias))) else: y = self.in_proj(y) x_mlp, q, k, v = torch.split(y, self.in_split, dim=-1) # Dot product attention w/ qk norm q = self.q_norm(q.view(B, N, self.num_heads, self.head_dim)).transpose(1, 2) k = self.k_norm(k.view(B, N, self.num_heads, self.head_dim)).transpose(1, 2) v = v.view(B, N, self.num_heads, self.head_dim).transpose(1, 2) if self.fused_attn: x_attn = F.scaled_dot_product_attention( q, k, v, attn_mask=attn_mask, dropout_p=self.attn_drop.p if self.training else 0., ) else: q = q * self.scale attn = q @ k.transpose(-2, -1) attn = maybe_add_mask(attn, attn_mask) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x_attn = attn @ v x_attn = x_attn.transpose(1, 2).reshape(B, N, C) x_attn = self.attn_out_proj(x_attn) # MLP activation, dropout, fc2 x_mlp = self.mlp_act(x_mlp) x_mlp = self.mlp_drop(x_mlp) x_mlp = self.mlp_out_proj(x_mlp) # Add residual w/ drop path & layer scale applied y = self.drop_path(self.ls(x_attn + x_mlp)) x = x + y return x class ParallelThingsBlock(nn.Module): """ Parallel ViT block (N parallel attention followed by N parallel MLP) Based on: `Three things everyone should know about Vision Transformers` - https://arxiv.org/abs/2203.09795 """ def __init__( self, dim: int, num_heads: int, num_parallel: int = 2, mlp_ratio: float = 4., qkv_bias: bool = False, qk_norm: bool = False, scale_attn_norm: bool = False, scale_mlp_norm: bool = False, proj_bias: bool = True, init_values: Optional[float] = None, proj_drop: float = 0., attn_drop: float = 0., drop_path: float = 0., act_layer: Type[nn.Module] = nn.GELU, norm_layer: Type[nn.Module] = LayerNorm, mlp_layer: Type[nn.Module] = Mlp, ) -> None: super().__init__() self.num_parallel = num_parallel self.attns = nn.ModuleList() self.ffns = nn.ModuleList() for _ in range(num_parallel): self.attns.append(nn.Sequential(OrderedDict([ ('norm', norm_layer(dim)), ('attn', Attention( dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_norm=qk_norm, scale_norm=scale_attn_norm, proj_bias=proj_bias, attn_drop=attn_drop, proj_drop=proj_drop, norm_layer=norm_layer, )), ('ls', LayerScale(dim, init_values=init_values) if init_values else nn.Identity()), ('drop_path', DropPath(drop_path) if drop_path > 0. else nn.Identity()) ]))) self.ffns.append(nn.Sequential(OrderedDict([ ('norm', norm_layer(dim)), ('mlp', mlp_layer( dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, norm_layer=norm_layer if scale_mlp_norm else None, bias=proj_bias, drop=proj_drop, )), ('ls', LayerScale(dim, init_values=init_values) if init_values else nn.Identity()), ('drop_path', DropPath(drop_path) if drop_path > 0. else nn.Identity()) ]))) def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None) -> torch.Tensor: if attn_mask is not None: attn_out = [] for attn in self.attns: x_attn = attn.norm(x) x_attn = attn.attn(x_attn, attn_mask=attn_mask) x_attn = attn.ls(x_attn) x_attn = attn.drop_path(x_attn) attn_out.append(x_attn) x = x + torch.stack(attn_out).sum(dim=0) else: x = x + torch.stack([attn(x) for attn in self.attns]).sum(dim=0) x = x + torch.stack([ffn(x) for ffn in self.ffns]).sum(dim=0) return x def global_pool_nlc( x: torch.Tensor, pool_type: str = 'token', num_prefix_tokens: int = 1, reduce_include_prefix: bool = False, ): if not pool_type: return x if pool_type == 'token': x = x[:, 0] # class token else: x = x if reduce_include_prefix else x[:, num_prefix_tokens:] if pool_type == 'avg': x = x.mean(dim=1) elif pool_type == 'avgmax': x = 0.5 * (x.amax(dim=1) + x.mean(dim=1)) elif pool_type == 'max': x = x.amax(dim=1) else: assert not pool_type, f'Unknown pool type {pool_type}' return x class VisionTransformer(nn.Module): """ Vision Transformer A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - https://arxiv.org/abs/2010.11929 """ dynamic_img_size: Final[bool] def __init__( self, img_size: Union[int, Tuple[int, int]] = 224, patch_size: Union[int, Tuple[int, int]] = 16, in_chans: int = 3, num_classes: int = 1000, global_pool: Literal['', 'avg', 'avgmax', 'max', 'token', 'map'] = 'token', embed_dim: int = 768, depth: int = 12, num_heads: int = 12, mlp_ratio: float = 4., qkv_bias: bool = True, qk_norm: bool = False, scale_attn_norm: bool = False, scale_mlp_norm: bool = False, proj_bias: bool = True, init_values: Optional[float] = None, class_token: bool = True, pos_embed: str = 'learn', no_embed_class: bool = False, reg_tokens: int = 0, pre_norm: bool = False, final_norm: bool = True, fc_norm: Optional[bool] = None, pool_include_prefix: bool = False, dynamic_img_size: bool = False, dynamic_img_pad: bool = False, drop_rate: float = 0., pos_drop_rate: float = 0., patch_drop_rate: float = 0., proj_drop_rate: float = 0., attn_drop_rate: float = 0., drop_path_rate: float = 0., weight_init: Literal['skip', 'jax', 'jax_nlhb', 'moco', ''] = '', fix_init: bool = False, embed_layer: Callable = PatchEmbed, embed_norm_layer: Optional[LayerType] = None, norm_layer: Optional[LayerType] = None, act_layer: Optional[LayerType] = None, block_fn: Type[nn.Module] = Block, mlp_layer: Type[nn.Module] = Mlp, ) -> None: """ Args: img_size: Input image size. patch_size: Patch size. in_chans: Number of image input channels. num_classes: Number of classes for classification head. global_pool: Type of global pooling for final sequence (default: 'token'). embed_dim: Transformer embedding dimension. depth: Depth of transformer. num_heads: Number of attention heads. mlp_ratio: Ratio of mlp hidden dim to embedding dim. qkv_bias: Enable bias for qkv projections if True. init_values: Layer-scale init values (layer-scale enabled if not None). class_token: Use class token. no_embed_class: Don't include position embeddings for class (or reg) tokens. reg_tokens: Number of register tokens. pre_norm: Enable norm after embeddings, before transformer blocks (standard in CLIP ViT). final_norm: Enable norm after transformer blocks, before head (standard in most ViT). fc_norm: Move final norm after pool (instead of before), if None, enabled when global_pool == 'avg'. drop_rate: Head dropout rate. pos_drop_rate: Position embedding dropout rate. attn_drop_rate: Attention dropout rate. drop_path_rate: Stochastic depth rate. weight_init: Weight initialization scheme. fix_init: Apply weight initialization fix (scaling w/ layer index). embed_layer: Patch embedding layer. embed_norm_layer: Normalization layer to use / override in patch embed module. norm_layer: Normalization layer. act_layer: MLP activation layer. block_fn: Transformer block layer. """ super().__init__() assert global_pool in ('', 'avg', 'avgmax', 'max', 'token', 'map') assert class_token or global_pool != 'token' assert pos_embed in ('', 'none', 'learn') use_fc_norm = global_pool in ('avg', 'avgmax', 'max') if fc_norm is None else fc_norm norm_layer = get_norm_layer(norm_layer) or LayerNorm embed_norm_layer = get_norm_layer(embed_norm_layer) act_layer = get_act_layer(act_layer) or nn.GELU self.num_classes = num_classes self.global_pool = global_pool self.num_features = self.head_hidden_size = self.embed_dim = embed_dim # for consistency with other models self.num_prefix_tokens = 1 if class_token else 0 self.num_prefix_tokens += reg_tokens self.num_reg_tokens = reg_tokens self.has_class_token = class_token self.no_embed_class = no_embed_class self.pool_include_prefix = pool_include_prefix self.dynamic_img_size = dynamic_img_size self.grad_checkpointing = False embed_args = {} if dynamic_img_size: # flatten deferred until after pos embed embed_args.update(dict(strict_img_size=False, output_fmt='NHWC')) if embed_norm_layer is not None: embed_args['norm_layer'] = embed_norm_layer self.patch_embed = embed_layer( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, bias=not pre_norm, # disable bias if pre-norm is used (e.g. CLIP) dynamic_img_pad=dynamic_img_pad, **embed_args, ) num_patches = self.patch_embed.num_patches reduction = self.patch_embed.feat_ratio() if hasattr(self.patch_embed, 'feat_ratio') else patch_size self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if class_token else None self.reg_token = nn.Parameter(torch.zeros(1, reg_tokens, embed_dim)) if reg_tokens else None embed_len = num_patches if no_embed_class else num_patches + self.num_prefix_tokens if not pos_embed or pos_embed == 'none': self.pos_embed = None else: self.pos_embed = nn.Parameter(torch.randn(1, embed_len, embed_dim) * .02) self.pos_drop = nn.Dropout(p=pos_drop_rate) if patch_drop_rate > 0: self.patch_drop = PatchDropout( patch_drop_rate, num_prefix_tokens=self.num_prefix_tokens, ) else: self.patch_drop = nn.Identity() self.norm_pre = norm_layer(embed_dim) if pre_norm else nn.Identity() dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.blocks = nn.Sequential(*[ block_fn( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_norm=qk_norm, scale_attn_norm=scale_attn_norm, scale_mlp_norm=scale_mlp_norm, proj_bias=proj_bias, init_values=init_values, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer, mlp_layer=mlp_layer, ) for i in range(depth)]) self.feature_info = [ dict(module=f'blocks.{i}', num_chs=embed_dim, reduction=reduction) for i in range(depth)] self.norm = norm_layer(embed_dim) if final_norm and not use_fc_norm else nn.Identity() # Classifier Head if global_pool == 'map': self.attn_pool = AttentionPoolLatent( self.embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, norm_layer=norm_layer, act_layer=act_layer, ) else: self.attn_pool = None self.fc_norm = norm_layer(embed_dim) if final_norm and use_fc_norm else nn.Identity() self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() if weight_init != 'skip': self.init_weights(weight_init) if fix_init: self.fix_init_weight() def fix_init_weight(self) -> None: """Apply weight initialization fix (scaling w/ layer index).""" def rescale(param, _layer_id): param.div_(math.sqrt(2.0 * _layer_id)) for layer_id, layer in enumerate(self.blocks): rescale(layer.attn.proj.weight.data, layer_id + 1) rescale(layer.mlp.fc2.weight.data, layer_id + 1) def init_weights(self, mode: str = '') -> None: """Initialize model weights. Args: mode: Weight initialization mode ('jax', 'jax_nlhb', 'moco', or ''). """ assert mode in ('jax', 'jax_nlhb', 'moco', '') head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0. if self.pos_embed is not None: trunc_normal_(self.pos_embed, std=.02) if self.cls_token is not None: nn.init.normal_(self.cls_token, std=1e-6) if self.reg_token is not None: nn.init.normal_(self.reg_token, std=1e-6) named_apply(get_init_weights_vit(mode, head_bias), self) def _init_weights(self, m: nn.Module) -> None: """Initialize weights for a single module (compatibility method).""" # this fn left here for compat with downstream users init_weights_vit_timm(m) @torch.jit.ignore() def load_pretrained(self, checkpoint_path: str, prefix: str = '') -> None: """Load pretrained weights. Args: checkpoint_path: Path to checkpoint. prefix: Prefix for state dict keys. """ _load_weights(self, checkpoint_path, prefix) @torch.jit.ignore def no_weight_decay(self) -> Set[str]: """Set of parameters that should not use weight decay.""" return {'pos_embed', 'cls_token', 'dist_token'} @torch.jit.ignore def group_matcher(self, coarse: bool = False) -> Dict[str, Union[str, List]]: """Create regex patterns for parameter grouping. Args: coarse: Use coarse grouping. Returns: Dictionary mapping group names to regex patterns. """ return dict( stem=r'^cls_token|pos_embed|patch_embed', # stem and embed blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))] ) @torch.jit.ignore def set_grad_checkpointing(self, enable: bool = True) -> None: """Enable or disable gradient checkpointing. Args: enable: Whether to enable gradient checkpointing. """ self.grad_checkpointing = enable if hasattr(self.patch_embed, 'set_grad_checkpointing'): self.patch_embed.set_grad_checkpointing(enable) @torch.jit.ignore def get_classifier(self) -> nn.Module: """Get the classifier head.""" return self.head def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None) -> None: """Reset the classifier head. Args: num_classes: Number of classes for new classifier. global_pool: Global pooling type. """ self.num_classes = num_classes if global_pool is not None: assert global_pool in ('', 'avg', 'avgmax', 'max', 'token', 'map') if global_pool == 'map' and self.attn_pool is None: assert False, "Cannot currently add attention pooling in reset_classifier()." elif global_pool != 'map' and self.attn_pool is not None: self.attn_pool = None # remove attention pooling self.global_pool = global_pool self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() def set_input_size( self, img_size: Optional[Tuple[int, int]] = None, patch_size: Optional[Tuple[int, int]] = None, ) -> None: """Update the input image resolution and patch size. Args: img_size: New input resolution, if None current resolution is used. patch_size: New patch size, if None existing patch size is used. """ prev_grid_size = self.patch_embed.grid_size self.patch_embed.set_input_size(img_size=img_size, patch_size=patch_size) if self.pos_embed is not None: num_prefix_tokens = 0 if self.no_embed_class else self.num_prefix_tokens num_new_tokens = self.patch_embed.num_patches + num_prefix_tokens if num_new_tokens != self.pos_embed.shape[1]: self.pos_embed = nn.Parameter(resample_abs_pos_embed( self.pos_embed, new_size=self.patch_embed.grid_size, old_size=prev_grid_size, num_prefix_tokens=num_prefix_tokens, verbose=True, )) def _pos_embed(self, x: torch.Tensor) -> torch.Tensor: """Apply positional embedding to input.""" if self.pos_embed is None: return x.view(x.shape[0], -1, x.shape[-1]) if self.dynamic_img_size: B, H, W, C = x.shape prev_grid_size = self.patch_embed.grid_size pos_embed = resample_abs_pos_embed( self.pos_embed, new_size=(H, W), old_size=prev_grid_size, num_prefix_tokens=0 if self.no_embed_class else self.num_prefix_tokens, ) x = x.view(B, -1, C) else: pos_embed = self.pos_embed to_cat = [] if self.cls_token is not None: to_cat.append(self.cls_token.expand(x.shape[0], -1, -1)) if self.reg_token is not None: to_cat.append(self.reg_token.expand(x.shape[0], -1, -1)) if self.no_embed_class: # deit-3, updated JAX (big vision) # position embedding does not overlap with class token, add then concat x = x + pos_embed if to_cat: x = torch.cat(to_cat + [x], dim=1) else: # original timm, JAX, and deit vit impl # pos_embed has entry for class token, concat then add if to_cat: x = torch.cat(to_cat + [x], dim=1) x = x + pos_embed return self.pos_drop(x) def forward_intermediates( self, x: torch.Tensor, indices: Optional[Union[int, List[int]]] = None, return_prefix_tokens: bool = False, norm: bool = False, stop_early: bool = False, output_fmt: str = 'NCHW', intermediates_only: bool = False, output_dict: bool = False, attn_mask: Optional[torch.Tensor] = None, ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]], Dict[str, Any]]: """ Forward features that returns intermediates. Args: x: Input image tensor indices: Take last n blocks if int, all if None, select matching indices if sequence return_prefix_tokens: Return both prefix and spatial intermediate tokens norm: Apply norm layer to all intermediates stop_early: Stop iterating over blocks when last desired intermediate hit output_fmt: Shape of intermediate feature outputs intermediates_only: Only return intermediate features output_dict: Return outputs as a dictionary with 'image_features' and 'image_intermediates' keys attn_mask: Optional attention mask for masked attention (e.g., for NaFlex) Returns: A tuple with (final_features, intermediates), a list of intermediate features, or a dictionary containing 'image_features' and 'image_intermediates' (and optionally 'image_intermediates_prefix') """ assert output_fmt in ('NCHW', 'NLC'), 'Output format must be one of NCHW or NLC.' reshape = output_fmt == 'NCHW' intermediates = [] take_indices, max_index = feature_take_indices(len(self.blocks), indices) # forward pass B, _, height, width = x.shape x = self.patch_embed(x) x = self._pos_embed(x) x = self.patch_drop(x) x = self.norm_pre(x) if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript blocks = self.blocks else: blocks = self.blocks[:max_index + 1] for i, blk in enumerate(blocks): if attn_mask is not None: x = blk(x, attn_mask=attn_mask) elif self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(blk, x) else: x = blk(x) if i in take_indices: # normalize intermediates with final norm layer if enabled intermediates.append(self.norm(x) if norm else x) # process intermediates if self.num_prefix_tokens: # split prefix (e.g. class, distill) and spatial feature tokens prefix_tokens = [y[:, 0:self.num_prefix_tokens] for y in intermediates] intermediates = [y[:, self.num_prefix_tokens:] for y in intermediates] else: prefix_tokens = None if reshape: # reshape to BCHW output format H, W = self.patch_embed.dynamic_feat_size((height, width)) intermediates = [y.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() for y in intermediates] # For dictionary output, handle prefix tokens separately if output_dict: result_dict = {} # Intermediates are always included result_dict['image_intermediates'] = intermediates if prefix_tokens is not None and return_prefix_tokens: result_dict['image_intermediates_prefix'] = prefix_tokens # Only include features if not intermediates_only if not intermediates_only: x_final = self.norm(x) result_dict['image_features'] = x_final return result_dict # For non-dictionary output, maintain the original behavior if not torch.jit.is_scripting() and return_prefix_tokens and prefix_tokens is not None: # return_prefix not support in torchscript due to poor type handling intermediates = list(zip(intermediates, prefix_tokens)) if intermediates_only: return intermediates x = self.norm(x) return x, intermediates def prune_intermediate_layers( self, indices: Union[int, List[int]] = 1, prune_norm: bool = False, prune_head: bool = True, ) -> List[int]: """Prune layers not required for specified intermediates. Args: indices: Indices of intermediate layers to keep. prune_norm: Whether to prune normalization layer. prune_head: Whether to prune the classifier head. Returns: List of indices that were kept. """ take_indices, max_index = feature_take_indices(len(self.blocks), indices) self.blocks = self.blocks[:max_index + 1] # truncate blocks if prune_norm: self.norm = nn.Identity() if prune_head: self.fc_norm = nn.Identity() self.reset_classifier(0, '') return take_indices def get_intermediate_layers( self, x: torch.Tensor, n: Union[int, List[int], Tuple[int]] = 1, reshape: bool = False, return_prefix_tokens: bool = False, norm: bool = False, attn_mask: Optional[torch.Tensor] = None, ) -> List[torch.Tensor]: """Get intermediate layer outputs (DINO interface compatibility). NOTE: This API is for backwards compat, favour using forward_intermediates() directly. Args: x: Input tensor. n: Number or indices of layers. reshape: Reshape to NCHW format. return_prefix_tokens: Return prefix tokens. norm: Apply normalization. Returns: List of intermediate features. """ return self.forward_intermediates( x, n, return_prefix_tokens=return_prefix_tokens, norm=norm, output_fmt='NCHW' if reshape else 'NLC', intermediates_only=True, attn_mask=attn_mask, ) def forward_features(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None) -> torch.Tensor: """Forward pass through feature layers (embeddings, transformer blocks, post-transformer norm).""" x = self.patch_embed(x) x = self._pos_embed(x) x = self.patch_drop(x) x = self.norm_pre(x) if attn_mask is not None: # If mask provided, we need to apply blocks one by one for blk in self.blocks: x = blk(x, attn_mask=attn_mask) elif self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) x = self.norm(x) return x def pool(self, x: torch.Tensor, pool_type: Optional[str] = None) -> torch.Tensor: """Apply pooling to feature tokens. Args: x: Feature tensor. pool_type: Pooling type override. Returns: Pooled features. """ if self.attn_pool is not None: if not self.pool_include_prefix: x = x[:, self.num_prefix_tokens:] x = self.attn_pool(x) return x pool_type = self.global_pool if pool_type is None else pool_type x = global_pool_nlc( x, pool_type=pool_type, num_prefix_tokens=self.num_prefix_tokens, reduce_include_prefix=self.pool_include_prefix, ) return x def forward_head(self, x: torch.Tensor, pre_logits: bool = False) -> torch.Tensor: """Forward pass through classifier head. Args: x: Feature tensor. pre_logits: Return features before final classifier. Returns: Output tensor. """ x = self.pool(x) x = self.fc_norm(x) x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None) -> torch.Tensor: x = self.forward_features(x, attn_mask=attn_mask) x = self.forward_head(x) return x def init_weights_vit_timm(module: nn.Module, name: str = '') -> None: """ViT weight initialization, original timm impl (for reproducibility). Args: module: Module to initialize. name: Module name for context. """ if isinstance(module, nn.Linear): trunc_normal_(module.weight, std=.02) if module.bias is not None: nn.init.zeros_(module.bias) elif hasattr(module, 'init_weights'): module.init_weights() def init_weights_vit_jax(module: nn.Module, name: str = '', head_bias: float = 0.0) -> None: """ViT weight initialization, matching JAX (Flax) impl. Args: module: Module to initialize. name: Module name for context. head_bias: Bias value for head layer. """ if isinstance(module, nn.Linear): if name.startswith('head'): nn.init.zeros_(module.weight) nn.init.constant_(module.bias, head_bias) else: nn.init.xavier_uniform_(module.weight) if module.bias is not None: nn.init.normal_(module.bias, std=1e-6) if 'mlp' in name else nn.init.zeros_(module.bias) elif isinstance(module, nn.Conv2d): lecun_normal_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) elif hasattr(module, 'init_weights'): module.init_weights() def init_weights_vit_moco(module: nn.Module, name: str = '') -> None: """ViT weight initialization, matching moco-v3 impl minus fixed PatchEmbed. Args: module: Module to initialize. name: Module name for context. """ if isinstance(module, nn.Linear): if 'qkv' in name: # treat the weights of Q, K, V separately val = math.sqrt(6. / float(module.weight.shape[0] // 3 + module.weight.shape[1])) nn.init.uniform_(module.weight, -val, val) else: nn.init.xavier_uniform_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) elif hasattr(module, 'init_weights'): module.init_weights() def get_init_weights_vit(mode: str = 'jax', head_bias: float = 0.0) -> Callable: if 'jax' in mode: return partial(init_weights_vit_jax, head_bias=head_bias) elif 'moco' in mode: return init_weights_vit_moco else: return init_weights_vit_timm def resize_pos_embed( posemb: torch.Tensor, posemb_new: torch.Tensor, num_prefix_tokens: int = 1, gs_new: Tuple[int, int] = (), interpolation: str = 'bicubic', antialias: bool = False, ) -> torch.Tensor: """ Rescale the grid of position embeddings when loading from state_dict. *DEPRECATED* This function is being deprecated in favour of using resample_abs_pos_embed """ ntok_new = posemb_new.shape[1] - num_prefix_tokens ntok_old = posemb.shape[1] - num_prefix_tokens gs_old = [int(math.sqrt(ntok_old))] * 2 if not len(gs_new): # backwards compatibility gs_new = [int(math.sqrt(ntok_new))] * 2 return resample_abs_pos_embed( posemb, gs_new, gs_old, num_prefix_tokens=num_prefix_tokens, interpolation=interpolation, antialias=antialias, verbose=True, ) @torch.no_grad() def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = '', load_bfloat16: bool = False) -> None: """ Load weights from .npz checkpoints for official Google Brain Flax implementation """ import numpy as np if load_bfloat16: import jax.numpy as jnp import ml_dtypes def _n2p(_w, t=True, idx=None): if idx is not None: _w = _w[idx] if load_bfloat16: _w = _w.view(ml_dtypes.bfloat16).astype(jnp.float32) _w = np.array(_w) if _w.ndim == 4 and _w.shape[0] == _w.shape[1] == _w.shape[2] == 1: _w = _w.flatten() if t: if _w.ndim == 4: _w = _w.transpose([3, 2, 0, 1]) elif _w.ndim == 3: _w = _w.transpose([2, 0, 1]) elif _w.ndim == 2: _w = _w.transpose([1, 0]) _w = torch.from_numpy(_w) return _w if load_bfloat16: w = jnp.load(checkpoint_path) else: w = np.load(checkpoint_path) interpolation = 'bilinear' antialias = False big_vision = False if not prefix: if 'opt/target/embedding/kernel' in w: prefix = 'opt/target/' elif 'params/embedding/kernel' in w: prefix = 'params/' big_vision = True elif 'params/img/embedding/kernel' in w: prefix = 'params/img/' big_vision = True if hasattr(model.patch_embed, 'backbone'): # hybrid backbone = model.patch_embed.backbone stem_only = not hasattr(backbone, 'stem') stem = backbone if stem_only else backbone.stem stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f'{prefix}conv_root/kernel']))) stem.norm.weight.copy_(_n2p(w[f'{prefix}gn_root/scale'])) stem.norm.bias.copy_(_n2p(w[f'{prefix}gn_root/bias'])) if not stem_only: for i, stage in enumerate(backbone.stages): for j, block in enumerate(stage.blocks): bp = f'{prefix}block{i + 1}/unit{j + 1}/' for r in range(3): getattr(block, f'conv{r + 1}').weight.copy_(_n2p(w[f'{bp}conv{r + 1}/kernel'])) getattr(block, f'norm{r + 1}').weight.copy_(_n2p(w[f'{bp}gn{r + 1}/scale'])) getattr(block, f'norm{r + 1}').bias.copy_(_n2p(w[f'{bp}gn{r + 1}/bias'])) if block.downsample is not None: block.downsample.conv.weight.copy_(_n2p(w[f'{bp}conv_proj/kernel'])) block.downsample.norm.weight.copy_(_n2p(w[f'{bp}gn_proj/scale'])) block.downsample.norm.bias.copy_(_n2p(w[f'{bp}gn_proj/bias'])) embed_conv_w = _n2p(w[f'{prefix}embedding/kernel']) else: embed_conv_w = adapt_input_conv( model.patch_embed.proj.weight.shape[1], _n2p(w[f'{prefix}embedding/kernel'])) if embed_conv_w.shape[-2:] != model.patch_embed.proj.weight.shape[-2:]: embed_conv_w = resample_patch_embed( embed_conv_w, model.patch_embed.proj.weight.shape[-2:], interpolation=interpolation, antialias=antialias, verbose=True, ) model.patch_embed.proj.weight.copy_(embed_conv_w) model.patch_embed.proj.bias.copy_(_n2p(w[f'{prefix}embedding/bias'])) if model.cls_token is not None: model.cls_token.copy_(_n2p(w[f'{prefix}cls'], t=False)) if big_vision: pos_embed_w = _n2p(w[f'{prefix}pos_embedding'], t=False) else: pos_embed_w = _n2p(w[f'{prefix}Transformer/posembed_input/pos_embedding'], t=False) if pos_embed_w.shape != model.pos_embed.shape: num_prefix_tokens = 0 if getattr(model, 'no_embed_class', False) else getattr(model, 'num_prefix_tokens', 1) pos_embed_w = resample_abs_pos_embed( # resize pos embedding when different size from pretrained weights pos_embed_w, new_size=model.patch_embed.grid_size, num_prefix_tokens=num_prefix_tokens, interpolation=interpolation, antialias=antialias, verbose=True, ) model.pos_embed.copy_(pos_embed_w) model.norm.weight.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/scale'])) model.norm.bias.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/bias'])) if (isinstance(model.head, nn.Linear) and f'{prefix}head/bias' in w and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]): model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel'])) model.head.bias.copy_(_n2p(w[f'{prefix}head/bias'])) # NOTE representation layer has been removed, not used in latest 21k/1k pretrained weights # if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w: # model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel'])) # model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias'])) if model.attn_pool is not None: block_prefix = f'{prefix}MAPHead_0/' mha_prefix = block_prefix + f'MultiHeadDotProductAttention_0/' model.attn_pool.latent.copy_(_n2p(w[f'{block_prefix}probe'], t=False)) model.attn_pool.kv.weight.copy_(torch.cat([ _n2p(w[f'{mha_prefix}{n}/kernel'], t=False).flatten(1).T for n in ('key', 'value')])) model.attn_pool.kv.bias.copy_(torch.cat([ _n2p(w[f'{mha_prefix}{n}/bias'], t=False).reshape(-1) for n in ('key', 'value')])) model.attn_pool.q.weight.copy_(_n2p(w[f'{mha_prefix}query/kernel'], t=False).flatten(1).T) model.attn_pool.q.bias.copy_(_n2p(w[f'{mha_prefix}query/bias'], t=False).reshape(-1)) model.attn_pool.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel']).flatten(1)) model.attn_pool.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias'])) model.attn_pool.norm.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale'])) model.attn_pool.norm.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias'])) for r in range(2): getattr(model.attn_pool.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_0/Dense_{r}/kernel'])) getattr(model.attn_pool.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_0/Dense_{r}/bias'])) mha_sub, b_sub, ln1_sub = (0, 0, 1) if big_vision else (1, 3, 2) for i, block in enumerate(model.blocks.children()): if f'{prefix}Transformer/encoderblock/LayerNorm_0/scale' in w: block_prefix = f'{prefix}Transformer/encoderblock/' idx = i else: block_prefix = f'{prefix}Transformer/encoderblock_{i}/' idx = None mha_prefix = block_prefix + f'MultiHeadDotProductAttention_{mha_sub}/' block.norm1.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale'], idx=idx)) block.norm1.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias'], idx=idx)) block.attn.qkv.weight.copy_(torch.cat([ _n2p(w[f'{mha_prefix}{n}/kernel'], t=False, idx=idx).flatten(1).T for n in ('query', 'key', 'value')])) block.attn.qkv.bias.copy_(torch.cat([ _n2p(w[f'{mha_prefix}{n}/bias'], t=False, idx=idx).reshape(-1) for n in ('query', 'key', 'value')])) block.attn.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel'], idx=idx).flatten(1)) block.attn.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias'], idx=idx)) block.norm2.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_{ln1_sub}/scale'], idx=idx)) block.norm2.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_{ln1_sub}/bias'], idx=idx)) for r in range(2): getattr(block.mlp, f'fc{r + 1}').weight.copy_( _n2p(w[f'{block_prefix}MlpBlock_{b_sub}/Dense_{r}/kernel'], idx=idx)) getattr(block.mlp, f'fc{r + 1}').bias.copy_( _n2p(w[f'{block_prefix}MlpBlock_{b_sub}/Dense_{r}/bias'], idx=idx)) def _convert_openai_clip( state_dict: Dict[str, torch.Tensor], model: VisionTransformer, prefix: str = 'visual.', ) -> Dict[str, torch.Tensor]: out_dict = {} swaps = [ ('conv1', 'patch_embed.proj'), ('positional_embedding', 'pos_embed'), ('transformer.resblocks.', 'blocks.'), ('ln_pre', 'norm_pre'), ('ln_post', 'norm'), ('ln_', 'norm'), ('in_proj_', 'qkv.'), ('out_proj', 'proj'), ('mlp.c_fc', 'mlp.fc1'), ('mlp.c_proj', 'mlp.fc2'), ] for k, v in state_dict.items(): if not k.startswith(prefix): continue k = k.replace(prefix, '') for sp in swaps: k = k.replace(sp[0], sp[1]) if k == 'proj': k = 'head.weight' v = v.transpose(0, 1) out_dict['head.bias'] = torch.zeros(v.shape[0]) elif k == 'class_embedding': k = 'cls_token' v = v.unsqueeze(0).unsqueeze(1) elif k == 'pos_embed': v = v.unsqueeze(0) out_dict[k] = v return out_dict def _convert_dinov2( state_dict: Dict[str, torch.Tensor], model: VisionTransformer, ) -> Dict[str, torch.Tensor]: import re out_dict = {} state_dict.pop("mask_token", None) if 'register_tokens' in state_dict: # convert dinov2 w/ registers to no_embed_class timm model (neither cls or reg tokens overlap pos embed) out_dict['reg_token'] = state_dict.pop('register_tokens') out_dict['cls_token'] = state_dict.pop('cls_token') + state_dict['pos_embed'][:, 0] out_dict['pos_embed'] = state_dict.pop('pos_embed')[:, 1:] for k, v in state_dict.items(): if re.match(r"blocks\.(\d+)\.mlp\.w12\.(?:weight|bias)", k): out_dict[k.replace("w12", "fc1")] = v continue elif re.match(r"blocks\.(\d+)\.mlp\.w3\.(?:weight|bias)", k): out_dict[k.replace("w3", "fc2")] = v continue out_dict[k] = v return out_dict def _convert_aimv2( state_dict: Dict[str, torch.Tensor], model: VisionTransformer, ) -> Dict[str, torch.Tensor]: out_dict = {} for k, v in state_dict.items(): k = k.replace('norm_1', 'norm1') k = k.replace('norm_2', 'norm2') k = k.replace('preprocessor.patchifier.', 'patch_embed.') k = k.replace('preprocessor.pos_embed', 'pos_embed') k = k.replace('trunk.', '') k = k.replace('post_trunk_norm.', 'norm.') k = k.replace('mlp.fc1', 'mlp.fc1_g') k = k.replace('mlp.fc3', 'mlp.fc1_x') out_dict[k] = v return out_dict def _convert_beit3(state_dict: dict, model): """ Turn a BEiT-3 checkpoint into a standard VisionTransformer state-dict. """ import re state_dict = state_dict.get("model", state_dict) # unwrap if needed # Prune unused for k in ("beit3.text_embed.weight", "beit3.vision_embed.mask_token"): state_dict.pop(k, None) # Key renaming rules rules = [ (r"beit3\.", ""), (r"vision_embed\.cls_token", "cls_token"), (r"vision_embed\.", "patch_embed."), (r"embed_positions\.", "pos_embed."), (r"encoder\.", ""), (r"layers\.", "blocks."), (r"ffn_layernorm\.", "norm."), (r"ffn\.", "mlp."), (r"self_attn_layer_norm\.", "norm1."), (r"self_attn\.", "attn."), (r"final_layer_norm\.", "norm2."), (r"inner_attn_ln", "norm"), (r"out_proj", "proj"), (r"\.A\.", "."), ] # First pass, rename keys tmp = {} for k, v in state_dict.items(): if ".B." in k: continue # use branch-A only for old, new in rules: k = re.sub(old, new, k) if k == "pos_embed.weight": # strip first two positions, [1, N+1, D] tmp["pos_embed"] = v[2:].unsqueeze(0) else: tmp[k] = v # Second pass, fuse q, k, v out, buf = {}, {} pat = re.compile(r"blocks\.(\d+)\.attn\.(q|k|v)_proj\.(weight|bias)$") for k, v in tmp.items(): m = pat.fullmatch(k) if not m: # anything not q/k/v -> copy through out[k] = v continue blk, which, kind = m.groups() # block idx, 'q'/'k'/'v', 'weight'/'bias' stash = buf.setdefault((blk, kind), {}) # Gather by block & param type stash[which] = v if len(stash) == 3: # Have q, k, v -> concatenate out[f"blocks.{blk}.attn.qkv.{kind}"] = torch.cat( [stash['q'], stash['k'], stash['v']], dim=0 ) return out def checkpoint_filter_fn( state_dict: Dict[str, torch.Tensor], model: VisionTransformer, adapt_layer_scale: bool = False, interpolation: str = 'bicubic', antialias: bool = True, ) -> Dict[str, torch.Tensor]: """ convert patch embedding weight from manual patchify + linear proj to conv""" import re out_dict = {} state_dict = state_dict.get('model', state_dict) state_dict = state_dict.get('state_dict', state_dict) prefix = '' if 'visual.class_embedding' in state_dict: state_dict = _convert_openai_clip(state_dict, model) elif 'module.visual.class_embedding' in state_dict: state_dict = _convert_openai_clip(state_dict, model, prefix='module.visual.') elif "mask_token" in state_dict: state_dict = _convert_dinov2(state_dict, model) elif any('beit3.' in k for k in state_dict.keys()): # BEiT3 model - multimodal checkpoint with beit3.* prefix state_dict = _convert_beit3(state_dict, model) elif "encoder" in state_dict: # IJEPA, vit in an 'encoder' submodule state_dict = state_dict['encoder'] prefix = 'module.' elif 'visual.trunk.pos_embed' in state_dict or 'visual.trunk.blocks.0.norm1.weight' in state_dict: # OpenCLIP model with timm vision encoder prefix = 'visual.trunk.' if 'visual.head.proj.weight' in state_dict and isinstance(model.head, nn.Linear): # remap final nn.Linear if it exists outside of the timm .trunk (ie in visual.head.proj) out_dict['head.weight'] = state_dict['visual.head.proj.weight'] out_dict['head.bias'] = torch.zeros(state_dict['visual.head.proj.weight'].shape[0]) elif 'preprocessor.patchifier.proj.weight' in state_dict: state_dict = _convert_aimv2(state_dict, model) if prefix: # filter on & remove prefix string from keys state_dict = {k[len(prefix):]: v for k, v in state_dict.items() if k.startswith(prefix)} for k, v in state_dict.items(): if 'patch_embed.proj.weight' in k: O, I, H, W = model.patch_embed.proj.weight.shape if len(v.shape) < 4: # For old models that I trained prior to conv based patchification O, I, H, W = model.patch_embed.proj.weight.shape v = v.reshape(O, -1, H, W) if v.shape[-1] != W or v.shape[-2] != H: v = resample_patch_embed( v, (H, W), interpolation=interpolation, antialias=antialias, verbose=True, ) elif k == 'pos_embed' and v.shape[1] != model.pos_embed.shape[1]: # To resize pos embedding when using model at different size from pretrained weights num_prefix_tokens = 0 if getattr(model, 'no_embed_class', False) else getattr(model, 'num_prefix_tokens', 1) v = resample_abs_pos_embed( v, new_size=model.patch_embed.grid_size, num_prefix_tokens=num_prefix_tokens, interpolation=interpolation, antialias=antialias, verbose=True, ) elif adapt_layer_scale and 'gamma_' in k: # remap layer-scale gamma into sub-module (deit3 models) k = re.sub(r'gamma_([0-9])', r'ls\1.gamma', k) elif 'pre_logits' in k: # NOTE representation layer removed as not used in latest 21k/1k pretrained weights continue out_dict[k] = v return out_dict def _cfg(url: str = '', **kwargs) -> Dict[str, Any]: return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': 0.9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head', **kwargs, } default_cfgs = { # re-finetuned augreg 21k FT on in1k weights 'vit_base_patch16_224.augreg2_in21k_ft_in1k': _cfg( hf_hub_id='timm/'), 'vit_base_patch16_384.augreg2_in21k_ft_in1k': _cfg(), 'vit_base_patch8_224.augreg2_in21k_ft_in1k': _cfg( hf_hub_id='timm/'), # How to train your ViT (augreg) weights, pretrained on 21k FT on in1k 'vit_tiny_patch16_224.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz', hf_hub_id='timm/', custom_load=True), 'vit_tiny_patch16_384.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', hf_hub_id='timm/', custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), 'vit_small_patch32_224.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz', hf_hub_id='timm/', custom_load=True), 'vit_small_patch32_384.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', hf_hub_id='timm/', custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), 'vit_small_patch16_224.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz', hf_hub_id='timm/', custom_load=True), 'vit_small_patch16_384.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', hf_hub_id='timm/', custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), 'vit_base_patch32_224.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/B_32-i21k-300ep-lr_0.001-aug_medium1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz', hf_hub_id='timm/', custom_load=True), 'vit_base_patch32_384.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/B_32-i21k-300ep-lr_0.001-aug_light1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', hf_hub_id='timm/', custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), 'vit_base_patch16_224.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_224.npz', hf_hub_id='timm/', custom_load=True), 'vit_base_patch16_384.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz', hf_hub_id='timm/', custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), 'vit_base_patch8_224.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/B_8-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_224.npz', hf_hub_id='timm/', custom_load=True), 'vit_large_patch16_224.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_224.npz', hf_hub_id='timm/', custom_load=True), 'vit_large_patch16_384.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_384.npz', hf_hub_id='timm/', custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), # patch models (weights from official Google JAX impl) pretrained on in21k FT on in1k 'vit_base_patch16_224.orig_in21k_ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_224-80ecf9dd.pth', hf_hub_id='timm/'), 'vit_base_patch16_384.orig_in21k_ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_384-83fb41ba.pth', hf_hub_id='timm/', input_size=(3, 384, 384), crop_pct=1.0), 'vit_large_patch32_384.orig_in21k_ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p32_384-9b920ba8.pth', hf_hub_id='timm/', input_size=(3, 384, 384), crop_pct=1.0), # How to train your ViT (augreg) weights trained on in1k only 'vit_small_patch16_224.augreg_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/S_16-i1k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_224.npz', hf_hub_id='timm/', custom_load=True), 'vit_small_patch16_384.augreg_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/S_16-i1k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz', hf_hub_id='timm/', custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), 'vit_base_patch32_224.augreg_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/B_32-i1k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_224.npz', hf_hub_id='timm/', custom_load=True), 'vit_base_patch32_384.augreg_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/B_32-i1k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_384.npz', hf_hub_id='timm/', custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), 'vit_base_patch16_224.augreg_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/B_16-i1k-300ep-lr_0.001-aug_strong2-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_224.npz', hf_hub_id='timm/', custom_load=True), 'vit_base_patch16_384.augreg_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/B_16-i1k-300ep-lr_0.001-aug_strong2-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_384.npz', hf_hub_id='timm/', custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), 'vit_large_patch14_224.untrained': _cfg(url=''), 'vit_huge_patch14_224.untrained': _cfg(url=''), 'vit_giant_patch14_224.untrained': _cfg(url=''), 'vit_gigantic_patch14_224.untrained': _cfg(url=''), # patch models, imagenet21k (weights from official Google JAX impl), classifier not valid 'vit_base_patch32_224.orig_in21k': _cfg( #url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_patch32_224_in21k-8db57226.pth', hf_hub_id='timm/', num_classes=0), 'vit_base_patch16_224.orig_in21k': _cfg( #url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_patch16_224_in21k-e5005f0a.pth', hf_hub_id='timm/', num_classes=0), 'vit_large_patch32_224.orig_in21k': _cfg( #url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_patch32_224_in21k-9046d2e7.pth', hf_hub_id='timm/', num_classes=0), 'vit_large_patch16_224.orig_in21k': _cfg( #url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_patch16_224_in21k-606da67d.pth', hf_hub_id='timm/', num_classes=0), 'vit_huge_patch14_224.orig_in21k': _cfg( hf_hub_id='timm/', num_classes=0), # How to train your ViT (augreg) weights, pretrained on in21k 'vit_tiny_patch16_224.augreg_in21k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0.npz', hf_hub_id='timm/', custom_load=True, num_classes=21843), 'vit_small_patch32_224.augreg_in21k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0.npz', hf_hub_id='timm/', custom_load=True, num_classes=21843), 'vit_small_patch16_224.augreg_in21k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0.npz', hf_hub_id='timm/', custom_load=True, num_classes=21843), 'vit_base_patch32_224.augreg_in21k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/B_32-i21k-300ep-lr_0.001-aug_medium1-wd_0.03-do_0.0-sd_0.0.npz', hf_hub_id='timm/', custom_load=True, num_classes=21843), 'vit_base_patch16_224.augreg_in21k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0.npz', hf_hub_id='timm/', custom_load=True, num_classes=21843), 'vit_base_patch8_224.augreg_in21k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/B_8-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0.npz', hf_hub_id='timm/', custom_load=True, num_classes=21843), 'vit_large_patch16_224.augreg_in21k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1.npz', hf_hub_id='timm/', custom_load=True, num_classes=21843), # SAM trained models (https://arxiv.org/abs/2106.01548) 'vit_base_patch32_224.sam_in1k': _cfg( url='https://storage.googleapis.com/vit_models/sam/ViT-B_32.npz', custom_load=True, hf_hub_id='timm/'), 'vit_base_patch16_224.sam_in1k': _cfg( url='https://storage.googleapis.com/vit_models/sam/ViT-B_16.npz', custom_load=True, hf_hub_id='timm/'), # DINO pretrained - https://arxiv.org/abs/2104.14294 (no classifier head, for fine-tune only) 'vit_small_patch16_224.dino': _cfg( url='https://dl.fbaipublicfiles.com/dino/dino_deitsmall16_pretrain/dino_deitsmall16_pretrain.pth', hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_small_patch8_224.dino': _cfg( url='https://dl.fbaipublicfiles.com/dino/dino_deitsmall8_pretrain/dino_deitsmall8_pretrain.pth', hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_base_patch16_224.dino': _cfg( url='https://dl.fbaipublicfiles.com/dino/dino_vitbase16_pretrain/dino_vitbase16_pretrain.pth', hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_base_patch8_224.dino': _cfg( url='https://dl.fbaipublicfiles.com/dino/dino_vitbase8_pretrain/dino_vitbase8_pretrain.pth', hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), # DINOv2 pretrained - https://arxiv.org/abs/2304.07193 (no classifier head, for fine-tune/features only) 'vit_small_patch14_dinov2.lvd142m': _cfg( url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_pretrain.pth', hf_hub_id='timm/', license='apache-2.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, input_size=(3, 518, 518), crop_pct=1.0), 'vit_base_patch14_dinov2.lvd142m': _cfg( url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_pretrain.pth', hf_hub_id='timm/', license='apache-2.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, input_size=(3, 518, 518), crop_pct=1.0), 'vit_large_patch14_dinov2.lvd142m': _cfg( url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_pretrain.pth', hf_hub_id='timm/', license='apache-2.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, input_size=(3, 518, 518), crop_pct=1.0), 'vit_giant_patch14_dinov2.lvd142m': _cfg( url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_pretrain.pth', hf_hub_id='timm/', license='apache-2.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, input_size=(3, 518, 518), crop_pct=1.0), # DINOv2 pretrained w/ registers - https://arxiv.org/abs/2309.16588 (no classifier head, for fine-tune/features only) 'vit_small_patch14_reg4_dinov2.lvd142m': _cfg( url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_reg4_pretrain.pth', hf_hub_id='timm/', license='apache-2.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, input_size=(3, 518, 518), crop_pct=1.0), 'vit_base_patch14_reg4_dinov2.lvd142m': _cfg( url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_reg4_pretrain.pth', hf_hub_id='timm/', license='apache-2.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, input_size=(3, 518, 518), crop_pct=1.0), 'vit_large_patch14_reg4_dinov2.lvd142m': _cfg( url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_reg4_pretrain.pth', hf_hub_id='timm/', license='apache-2.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, input_size=(3, 518, 518), crop_pct=1.0), 'vit_giant_patch14_reg4_dinov2.lvd142m': _cfg( url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_reg4_pretrain.pth', hf_hub_id='timm/', license='apache-2.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, input_size=(3, 518, 518), crop_pct=1.0), # ViT ImageNet-21K-P pretraining by MILL 'vit_base_patch16_224_miil.in21k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/vit_base_patch16_224_in21k_miil-887286df.pth', hf_hub_id='timm/', mean=(0., 0., 0.), std=(1., 1., 1.), crop_pct=0.875, interpolation='bilinear', num_classes=11221), 'vit_base_patch16_224_miil.in21k_ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/vit_base_patch16_224_1k_miil_84_4-2deb18e3.pth', hf_hub_id='timm/', mean=(0., 0., 0.), std=(1., 1., 1.), crop_pct=0.875, interpolation='bilinear'), # Custom timm variants 'vit_base_patch16_rpn_224.sw_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_base_patch16_rpn_224-sw-3b07e89d.pth', hf_hub_id='timm/'), 'vit_medium_patch16_gap_240.sw_in12k': _cfg( hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95, num_classes=11821), 'vit_medium_patch16_gap_256.sw_in12k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95), 'vit_medium_patch16_gap_384.sw_in12k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), crop_pct=0.95, crop_mode='squash'), 'vit_base_patch16_gap_224': _cfg(), # CLIP pretrained image tower and related fine-tuned weights 'vit_base_patch32_clip_224.laion2b_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD), 'vit_base_patch32_clip_384.laion2b_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 384, 384)), 'vit_base_patch32_clip_448.laion2b_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 448, 448)), 'vit_base_patch16_clip_224.laion2b_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=0.95), 'vit_base_patch16_clip_384.laion2b_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 384, 384), crop_mode='squash'), 'vit_large_patch14_clip_224.laion2b_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=1.0), 'vit_large_patch14_clip_336.laion2b_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=1.0, input_size=(3, 336, 336), crop_mode='squash'), 'vit_huge_patch14_clip_224.laion2b_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0), 'vit_huge_patch14_clip_336.laion2b_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 336, 336), crop_mode='squash'), 'vit_base_patch32_clip_224.openai_ft_in12k_in1k': _cfg( # hf_hub_id='timm/vit_base_patch32_clip_224.openai_ft_in12k_in1k', # FIXME weight exists, need to push mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD), 'vit_base_patch32_clip_384.openai_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=0.95, input_size=(3, 384, 384), crop_mode='squash'), 'vit_base_patch16_clip_224.openai_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=0.95), 'vit_base_patch16_clip_384.openai_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=0.95, input_size=(3, 384, 384), crop_mode='squash'), 'vit_large_patch14_clip_224.openai_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0), 'vit_large_patch14_clip_336.openai_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 336, 336), crop_mode='squash'), 'vit_base_patch32_clip_224.laion2b_ft_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD), 'vit_base_patch16_clip_224.laion2b_ft_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0), 'vit_base_patch16_clip_384.laion2b_ft_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 384, 384), crop_mode='squash'), 'vit_large_patch14_clip_224.laion2b_ft_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=1.0), 'vit_large_patch14_clip_336.laion2b_ft_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=1.0, input_size=(3, 336, 336), crop_mode='squash'), 'vit_huge_patch14_clip_224.laion2b_ft_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0), 'vit_huge_patch14_clip_336.laion2b_ft_in1k': _cfg( hf_hub_id='', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 336, 336), crop_mode='squash'), 'vit_base_patch32_clip_224.openai_ft_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD), 'vit_base_patch16_clip_224.openai_ft_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD), 'vit_base_patch16_clip_384.openai_ft_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 384, 384), crop_mode='squash'), 'vit_large_patch14_clip_224.openai_ft_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0), 'vit_base_patch16_clip_224.laion2b_ft_in12k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821), 'vit_large_patch14_clip_224.laion2b_ft_in12k': _cfg( hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=1.0, num_classes=11821), 'vit_huge_patch14_clip_224.laion2b_ft_in12k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=11821), 'vit_base_patch16_clip_224.openai_ft_in12k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821), 'vit_large_patch14_clip_224.openai_ft_in12k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=11821), 'vit_base_patch32_clip_224.laion2b': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=512), 'vit_base_patch16_clip_224.laion2b': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=512), 'vit_large_patch14_clip_224.laion2b': _cfg( hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=1.0, num_classes=768), 'vit_huge_patch14_clip_224.laion2b': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=1024), 'vit_giant_patch14_clip_224.laion2b': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=1024), 'vit_gigantic_patch14_clip_224.laion2b': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=1280), 'vit_base_patch32_clip_224.laion400m_e32': _cfg( hf_hub_id='timm/', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=512), 'vit_base_patch16_clip_224.laion400m_e32': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=512), 'vit_base_patch16_plus_clip_240.laion400m_e32': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 240, 240), crop_pct=1.0, num_classes=640), 'vit_large_patch14_clip_224.laion400m_e32': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=768), 'vit_base_patch32_clip_224.datacompxl': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=512), 'vit_base_patch32_clip_256.datacompxl': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 256, 256), num_classes=512), 'vit_base_patch16_clip_224.datacompxl': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=512), 'vit_large_patch14_clip_224.datacompxl': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=768), 'vit_base_patch16_clip_224.dfn2b': _cfg( hf_hub_id='timm/', license='apple-ascl', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=512), 'vit_large_patch14_clip_224.dfn2b_s39b': _cfg( hf_hub_id='timm/', license='apple-ascl', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=768), 'vit_large_patch14_clip_224.dfn2b': _cfg( hf_hub_id='timm/', license='apple-ascl', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=768), 'vit_huge_patch14_clip_224.dfn5b': _cfg( hf_hub_id='timm/', license='apple-ascl', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=1024), 'vit_huge_patch14_clip_378.dfn5b': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, license='apple-ascl', notes=('natively QuickGELU, use quickgelu model variant for original results',), crop_pct=1.0, input_size=(3, 378, 378), num_classes=1024), 'vit_large_patch14_clip_224.metaclip2_worldwide': _cfg( hf_hub_id='timm/', license='cc-by-nc-4.0', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=768), 'vit_huge_patch14_clip_224.metaclip2_worldwide': _cfg( hf_hub_id='timm/', license='cc-by-nc-4.0', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=1024), 'vit_huge_patch14_clip_378.metaclip2_worldwide': _cfg( hf_hub_id='timm/', license='cc-by-nc-4.0', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 378, 378), crop_pct=1.0, crop_mode='squash', num_classes=1024), 'vit_gigantic_patch14_clip_224.metaclip2_worldwide': _cfg( hf_hub_id='timm/', license='cc-by-nc-4.0', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=1280), 'vit_gigantic_patch14_clip_378.metaclip2_worldwide': _cfg( hf_hub_id='timm/', license='cc-by-nc-4.0', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 378, 378), crop_pct=1.0, crop_mode='squash', num_classes=1280), 'vit_base_patch32_clip_224.metaclip_2pt5b': _cfg( hf_hub_id='timm/', license='cc-by-nc-4.0', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=512), 'vit_base_patch16_clip_224.metaclip_2pt5b': _cfg( hf_hub_id='timm/', license='cc-by-nc-4.0', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=512), 'vit_large_patch14_clip_224.metaclip_2pt5b': _cfg( hf_hub_id='timm/', license='cc-by-nc-4.0', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=768), 'vit_huge_patch14_clip_224.metaclip_2pt5b': _cfg( hf_hub_id='timm/', license='cc-by-nc-4.0', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=1024), 'vit_huge_patch14_clip_224.metaclip_altogether': _cfg( hf_hub_id='timm/', license='cc-by-nc-4.0', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=1024), 'vit_gigantic_patch14_clip_224.metaclip_2pt5b': _cfg( hf_hub_id='timm/', license='cc-by-nc-4.0', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=1280), 'vit_base_patch32_clip_224.metaclip_400m': _cfg( hf_hub_id='timm/', license='cc-by-nc-4.0', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=512), 'vit_base_patch16_clip_224.metaclip_400m': _cfg( hf_hub_id='timm/', license='cc-by-nc-4.0', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=512), 'vit_large_patch14_clip_224.metaclip_400m': _cfg( hf_hub_id='timm/', license='cc-by-nc-4.0', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=768), 'vit_base_patch32_clip_224.openai': _cfg( hf_hub_id='timm/', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=512), 'vit_base_patch16_clip_224.openai': _cfg( hf_hub_id='timm/', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=512), 'vit_large_patch14_clip_224.openai': _cfg( hf_hub_id='timm/', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=768), 'vit_large_patch14_clip_336.openai': _cfg( hf_hub_id='timm/', notes=('natively QuickGELU, use quickgelu model variant for original results',), mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 336, 336), num_classes=768), # experimental (may be removed) 'vit_base_patch32_plus_256.untrained': _cfg(url='', input_size=(3, 256, 256), crop_pct=0.95), 'vit_base_patch16_plus_240.untrained': _cfg(url='', input_size=(3, 240, 240), crop_pct=0.95), 'vit_small_patch16_36x1_224.untrained': _cfg(url=''), 'vit_small_patch16_18x2_224.untrained': _cfg(url=''), 'vit_base_patch16_18x2_224.untrained': _cfg(url=''), # EVA fine-tuned weights from MAE style MIM - EVA-CLIP target pretrain # https://github.com/baaivision/EVA/blob/7ecf2c0a370d97967e86d047d7af9188f78d2df3/eva/README.md#eva-l-learning-better-mim-representations-from-eva-clip 'eva_large_patch14_196.in22k_ft_in22k_in1k': _cfg( # hf_hub_id='BAAI/EVA', hf_hub_filename='eva_l_psz14_196px_21k_to_1k_ft_88p6.pt', hf_hub_id='timm/', license='mit', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 196, 196), crop_pct=1.0), 'eva_large_patch14_336.in22k_ft_in22k_in1k': _cfg( # hf_hub_id='BAAI/EVA', hf_hub_filename='eva_l_psz14_336px_21k_to_1k_ft_89p2.pt', hf_hub_id='timm/', license='mit', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 336, 336), crop_pct=1.0, crop_mode='squash'), 'eva_large_patch14_196.in22k_ft_in1k': _cfg( # hf_hub_id='BAAI/EVA', hf_hub_filename='eva_l_psz14_196px_1k_ft_88p0.pt', hf_hub_id='timm/', license='mit', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 196, 196), crop_pct=1.0), 'eva_large_patch14_336.in22k_ft_in1k': _cfg( # hf_hub_id='BAAI/EVA', hf_hub_filename='eva_l_psz14_336px_1k_ft_88p65.pt', hf_hub_id='timm/', license='mit', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 336, 336), crop_pct=1.0, crop_mode='squash'), 'flexivit_small.1200ep_in1k': _cfg( url='https://storage.googleapis.com/big_vision/flexivit/flexivit_s_i1k.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95), 'flexivit_small.600ep_in1k': _cfg( url='https://storage.googleapis.com/big_vision/flexivit/flexivit_s_i1k_600ep.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95), 'flexivit_small.300ep_in1k': _cfg( url='https://storage.googleapis.com/big_vision/flexivit/flexivit_s_i1k_300ep.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95), 'flexivit_base.1200ep_in1k': _cfg( url='https://storage.googleapis.com/big_vision/flexivit/flexivit_b_i1k.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95), 'flexivit_base.600ep_in1k': _cfg( url='https://storage.googleapis.com/big_vision/flexivit/flexivit_b_i1k_600ep.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95), 'flexivit_base.300ep_in1k': _cfg( url='https://storage.googleapis.com/big_vision/flexivit/flexivit_b_i1k_300ep.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95), 'flexivit_base.1000ep_in21k': _cfg( url='https://storage.googleapis.com/big_vision/flexivit/flexivit_b_i21k_1000ep.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95, num_classes=21843), 'flexivit_base.300ep_in21k': _cfg( url='https://storage.googleapis.com/big_vision/flexivit/flexivit_b_i21k_300ep.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95, num_classes=21843), 'flexivit_large.1200ep_in1k': _cfg( url='https://storage.googleapis.com/big_vision/flexivit/flexivit_l_i1k.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95), 'flexivit_large.600ep_in1k': _cfg( url='https://storage.googleapis.com/big_vision/flexivit/flexivit_l_i1k_600ep.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95), 'flexivit_large.300ep_in1k': _cfg( url='https://storage.googleapis.com/big_vision/flexivit/flexivit_l_i1k_300ep.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95), 'flexivit_base.patch16_in21k': _cfg( url='https://storage.googleapis.com/big_vision/flexivit/vit_b16_i21k_300ep.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95, num_classes=21843), 'flexivit_base.patch30_in21k': _cfg( url='https://storage.googleapis.com/big_vision/flexivit/vit_b30_i21k_300ep.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95, num_classes=21843), 'vit_base_patch16_xp_224.untrained': _cfg(url=''), 'vit_large_patch14_xp_224.untrained': _cfg(url=''), 'vit_huge_patch14_xp_224.untrained': _cfg(url=''), 'vit_base_patch16_224.mae': _cfg( url='https://dl.fbaipublicfiles.com/mae/pretrain/mae_pretrain_vit_base.pth', hf_hub_id='timm/', license='cc-by-nc-4.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_large_patch16_224.mae': _cfg( url='https://dl.fbaipublicfiles.com/mae/pretrain/mae_pretrain_vit_large.pth', hf_hub_id='timm/', license='cc-by-nc-4.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_huge_patch14_224.mae': _cfg( url='https://dl.fbaipublicfiles.com/mae/pretrain/mae_pretrain_vit_huge.pth', hf_hub_id='timm/', license='cc-by-nc-4.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_huge_patch14_gap_224.in1k_ijepa': _cfg( url='https://dl.fbaipublicfiles.com/ijepa/IN1K-vit.h.14-300e.pth.tar', # hf_hub_id='timm/', license='cc-by-nc-4.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_huge_patch14_gap_224.in22k_ijepa': _cfg( url='https://dl.fbaipublicfiles.com/ijepa/IN22K-vit.h.14-900e.pth.tar', # hf_hub_id='timm/', license='cc-by-nc-4.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_huge_patch16_gap_448.in1k_ijepa': _cfg( url='https://dl.fbaipublicfiles.com/ijepa/IN1K-vit.h.16-448px-300e.pth.tar', # hf_hub_id='timm/', license='cc-by-nc-4.0', input_size=(3, 448, 448), crop_pct=1.0, mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_giant_patch16_gap_224.in22k_ijepa': _cfg( url='https://dl.fbaipublicfiles.com/ijepa/IN22K-vit.g.16-600e.pth.tar', # hf_hub_id='timm/', license='cc-by-nc-4.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_base_patch32_siglip_256.v2_webli': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), num_classes=0), 'vit_base_patch16_siglip_224.v2_webli': _cfg( hf_hub_id='timm/', num_classes=0), 'vit_base_patch16_siglip_224.webli': _cfg( hf_hub_id='timm/', num_classes=0), 'vit_base_patch16_siglip_256.v2_webli': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), num_classes=0), 'vit_base_patch16_siglip_256.webli': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), num_classes=0), 'vit_base_patch16_siglip_256.webli_i18n': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), num_classes=0), 'vit_base_patch16_siglip_384.v2_webli': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), num_classes=0), 'vit_base_patch16_siglip_384.webli': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), num_classes=0), 'vit_base_patch16_siglip_512.v2_webli': _cfg( hf_hub_id='timm/', input_size=(3, 512, 512), num_classes=0), 'vit_base_patch16_siglip_512.webli': _cfg( hf_hub_id='timm/', input_size=(3, 512, 512), num_classes=0), 'vit_large_patch16_siglip_256.v2_webli': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), num_classes=0), 'vit_large_patch16_siglip_256.webli': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), num_classes=0), 'vit_large_patch16_siglip_384.v2_webli': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), num_classes=0), 'vit_large_patch16_siglip_384.webli': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), num_classes=0), 'vit_large_patch16_siglip_512.v2_webli': _cfg( hf_hub_id='timm/', input_size=(3, 512, 512), num_classes=0), 'vit_so400m_patch14_siglip_224.v2_webli': _cfg( hf_hub_id='timm/', num_classes=0), 'vit_so400m_patch14_siglip_224.webli': _cfg( hf_hub_id='timm/', num_classes=0), 'vit_so400m_patch14_siglip_378.v2_webli': _cfg( hf_hub_id='timm/', input_size=(3, 378, 378), num_classes=0), 'vit_so400m_patch14_siglip_378.webli': _cfg( hf_hub_id='timm/', input_size=(3, 378, 378), num_classes=0), 'vit_so400m_patch14_siglip_384.webli': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), num_classes=0), 'vit_so400m_patch16_siglip_256.v2_webli': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), num_classes=0), 'vit_so400m_patch16_siglip_256.webli_i18n': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), num_classes=0), 'vit_so400m_patch16_siglip_384.v2_webli': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), num_classes=0), 'vit_so400m_patch16_siglip_512.v2_webli': _cfg( hf_hub_id='timm/', input_size=(3, 512, 512), num_classes=0), 'vit_giantopt_patch16_siglip_256.v2_webli': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), num_classes=0), 'vit_giantopt_patch16_siglip_384.v2_webli': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), num_classes=0), 'vit_base_patch32_siglip_gap_256.v2_webli': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), num_classes=0), 'vit_base_patch16_siglip_gap_224.v2_webli': _cfg( hf_hub_id='timm/', num_classes=0), 'vit_base_patch16_siglip_gap_224.webli': _cfg( hf_hub_id='timm/', num_classes=0), 'vit_base_patch16_siglip_gap_256.v2_webli': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), num_classes=0), 'vit_base_patch16_siglip_gap_256.webli': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), num_classes=0), 'vit_base_patch16_siglip_gap_256.webli_i18n': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), num_classes=0), 'vit_base_patch16_siglip_gap_384.v2_webli': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), num_classes=0), 'vit_base_patch16_siglip_gap_384.webli': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), num_classes=0), 'vit_base_patch16_siglip_gap_512.v2_webli': _cfg( hf_hub_id='timm/', input_size=(3, 512, 512), num_classes=0), 'vit_base_patch16_siglip_gap_512.webli': _cfg( hf_hub_id='timm/', input_size=(3, 512, 512), num_classes=0), 'vit_large_patch16_siglip_gap_256.v2_webli': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), num_classes=0), 'vit_large_patch16_siglip_gap_256.webli': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), num_classes=0), 'vit_large_patch16_siglip_gap_384.v2_webli': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), num_classes=0), 'vit_large_patch16_siglip_gap_384.webli': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), num_classes=0), 'vit_large_patch16_siglip_gap_512.v2_webli': _cfg( hf_hub_id='timm/', input_size=(3, 512, 512), num_classes=0), 'vit_so400m_patch14_siglip_gap_224.v2_webli': _cfg( hf_hub_id='timm/', num_classes=0), 'vit_so400m_patch14_siglip_gap_224.webli': _cfg( hf_hub_id='timm/', num_classes=0), 'vit_so400m_patch14_siglip_gap_224.pali_mix': _cfg( hf_hub_id='timm/', num_classes=0), 'vit_so400m_patch14_siglip_gap_224.pali_pt': _cfg( hf_hub_id='timm/', num_classes=0), 'vit_so400m_patch14_siglip_gap_224.pali2_3b_pt': _cfg( hf_hub_id='timm/', num_classes=0), 'vit_so400m_patch14_siglip_gap_224.pali2_10b_pt': _cfg( hf_hub_id='timm/', num_classes=0), # 'vit_so400m_patch14_siglip_gap_224.pali2_28b_pt': _cfg( # hf_hub_id='google/paligemma2-28b-pt-224-jax', # hf_hub_filename='pt_27b_224.npz', # custom_load='hf', # num_classes=0), 'vit_so400m_patch14_siglip_gap_378.v2_webli': _cfg( hf_hub_id='timm/', input_size=(3, 378, 378), num_classes=0), 'vit_so400m_patch14_siglip_gap_378.webli': _cfg( hf_hub_id='timm/', input_size=(3, 378, 378), crop_pct=1.0, num_classes=0), 'vit_so400m_patch14_siglip_gap_384.webli': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), crop_pct=1.0, num_classes=0), 'vit_so400m_patch14_siglip_gap_448.pali_mix': _cfg( hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, num_classes=0), 'vit_so400m_patch14_siglip_gap_448.pali_pt': _cfg( hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, num_classes=0), 'vit_so400m_patch14_siglip_gap_448.pali_refcoco_seg': _cfg( hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, num_classes=0), 'vit_so400m_patch14_siglip_gap_448.pali_ocrvqa': _cfg( hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, num_classes=0), 'vit_so400m_patch14_siglip_gap_448.pali2_3b_pt': _cfg( hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, num_classes=0), 'vit_so400m_patch14_siglip_gap_448.pali2_10b_pt': _cfg( hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, num_classes=0), # 'vit_so400m_patch14_siglip_gap_448.pali2_28b_pt': _cfg( # hf_hub_id='google/paligemma2-28b-pt-448-jax', # hf_hub_filename='pt_27b_448.npz', # custom_load='hf', # input_size=(3, 448, 448), crop_pct=1.0, # num_classes=0), 'vit_so400m_patch14_siglip_gap_448.pali2_3b_docci': _cfg( hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, num_classes=0), 'vit_so400m_patch14_siglip_gap_448.pali2_10b_docci': _cfg( hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, num_classes=0), 'vit_so400m_patch14_siglip_gap_896.pali_pt': _cfg( hf_hub_id='timm/', input_size=(3, 896, 896), crop_pct=1.0, num_classes=0), 'vit_so400m_patch14_siglip_gap_896.pali_refcoco_seg': _cfg( hf_hub_id='timm/', input_size=(3, 896, 896), crop_pct=1.0, num_classes=0), 'vit_so400m_patch14_siglip_gap_896.pali_ocrvqa': _cfg( hf_hub_id='timm/', input_size=(3, 896, 896), crop_pct=1.0, num_classes=0), 'vit_so400m_patch14_siglip_gap_896.pali2_3b_pt': _cfg( hf_hub_id='timm/', input_size=(3, 896, 896), crop_pct=1.0, num_classes=0), 'vit_so400m_patch14_siglip_gap_896.pali2_10b_pt': _cfg( hf_hub_id='timm/', input_size=(3, 896, 896), crop_pct=1.0, num_classes=0), # 'vit_so400m_patch14_siglip_gap_896.pali2_28b_pt': _cfg( # hf_hub_id='google/paligemma2-28b-pt-896-jax', # hf_hub_filename='pt_27b_896.npz', # custom_load='hf', # input_size=(3, 896, 896), crop_pct=1.0, # num_classes=0), 'vit_so400m_patch16_siglip_gap_256.v2_webli': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), num_classes=0), 'vit_so400m_patch16_siglip_gap_256.webli_i18n': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), num_classes=0), 'vit_so400m_patch16_siglip_gap_384.v2_webli': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), num_classes=0), 'vit_so400m_patch16_siglip_gap_512.v2_webli': _cfg( hf_hub_id='timm/', input_size=(3, 512, 512), num_classes=0), 'vit_giantopt_patch16_siglip_gap_256.v2_webli': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), num_classes=0), 'vit_giantopt_patch16_siglip_gap_384.v2_webli': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), num_classes=0), 'vit_so400m_patch14_siglip_378.webli_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 378, 378), crop_pct=1.0, crop_mode='squash', ), 'vit_so400m_patch14_siglip_gap_378.webli_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 378, 378), crop_pct=1.0, crop_mode='squash', ), 'vit_xsmall_patch16_clip_224.tinyclip_yfcc15m': _cfg( hf_hub_id='timm/', license='mit', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=512), 'vit_medium_patch32_clip_224.tinyclip_laion400m': _cfg( hf_hub_id='timm/', license='mit', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=512), 'vit_medium_patch16_clip_224.tinyclip_yfcc15m': _cfg( hf_hub_id='timm/', license='mit', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=512), 'vit_betwixt_patch32_clip_224.tinyclip_laion400m': _cfg( hf_hub_id='timm/', license='mit', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=512), 'vit_wee_patch16_reg1_gap_256.sbb_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95), 'vit_pwee_patch16_reg1_gap_256.sbb_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95), 'vit_little_patch16_reg1_gap_256.sbb_in12k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95), 'vit_little_patch16_reg1_gap_256.sbb_in12k': _cfg( hf_hub_id='timm/', num_classes=11821, input_size=(3, 256, 256), crop_pct=0.95), 'vit_little_patch16_reg4_gap_256.sbb_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95), 'vit_medium_patch16_reg1_gap_256.sbb_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95), 'vit_medium_patch16_reg4_gap_256.sbb_in12k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95), 'vit_medium_patch16_reg4_gap_256.sbb_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95), 'vit_medium_patch16_reg4_gap_256.sbb_in12k': _cfg( hf_hub_id='timm/', num_classes=11821, input_size=(3, 256, 256), crop_pct=0.95), 'vit_mediumd_patch16_reg4_gap_256.sbb2_e200_in12k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95), 'vit_mediumd_patch16_reg4_gap_256.sbb_in12k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95), 'vit_mediumd_patch16_reg4_gap_256.sbb2_e200_in12k': _cfg( hf_hub_id='timm/', num_classes=11821, input_size=(3, 256, 256), crop_pct=0.95), 'vit_mediumd_patch16_reg4_gap_256.sbb_in12k': _cfg( hf_hub_id='timm/', num_classes=11821, input_size=(3, 256, 256), crop_pct=0.95), 'vit_mediumd_patch16_reg4_gap_384.sbb2_e200_in12k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), crop_pct=1.0), 'vit_betwixt_patch16_reg1_gap_256.sbb_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95), 'vit_betwixt_patch16_reg4_gap_256.sbb2_e200_in12k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95), 'vit_betwixt_patch16_reg4_gap_256.sbb_in12k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95), 'vit_betwixt_patch16_reg4_gap_256.sbb_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95), 'vit_betwixt_patch16_reg4_gap_256.sbb2_e200_in12k': _cfg( hf_hub_id='timm/', num_classes=11821, input_size=(3, 256, 256), crop_pct=0.95), 'vit_betwixt_patch16_reg4_gap_256.sbb_in12k': _cfg( hf_hub_id='timm/', num_classes=11821, input_size=(3, 256, 256), crop_pct=0.95), 'vit_betwixt_patch16_reg4_gap_384.sbb2_e200_in12k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), crop_pct=1.0), 'vit_base_patch16_reg4_gap_256.untrained': _cfg( input_size=(3, 256, 256)), 'vit_so150m_patch16_reg4_gap_256.sbb_e250_in12k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95), 'vit_so150m_patch16_reg4_gap_256.sbb_e250_in12k': _cfg( hf_hub_id='timm/', num_classes=11821, input_size=(3, 256, 256), crop_pct=0.95), 'vit_so150m_patch16_reg4_gap_384.sbb_e250_in12k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), crop_pct=1.0), 'vit_so150m_patch16_reg4_map_256.untrained': _cfg( input_size=(3, 256, 256)), 'vit_so150m2_patch16_reg1_gap_256.sbb_e200_in12k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=1.0), 'vit_so150m2_patch16_reg1_gap_256.sbb_e200_in12k': _cfg( hf_hub_id='timm/', num_classes=11821, input_size=(3, 256, 256), crop_pct=1.0), 'vit_so150m2_patch16_reg1_gap_384.sbb_e200_in12k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), crop_pct=1.0), 'vit_so150m2_patch16_reg1_gap_448.sbb_e200_in12k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash'), 'vit_intern300m_patch14_448.ogvl_dist': _cfg( hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, input_size=(3, 448, 448), crop_pct=1.0, num_classes=0, ), 'vit_intern300m_patch14_448.ogvl_2pt5': _cfg( hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, input_size=(3, 448, 448), crop_pct=1.0, num_classes=0, ), 'aimv2_large_patch14_224.apple_pt': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, license='apple-ascl', crop_pct=1.0, num_classes=0), 'aimv2_large_patch14_224.apple_pt_dist': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, license='apple-ascl', crop_pct=1.0, num_classes=0), 'aimv2_huge_patch14_224.apple_pt': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, license='apple-ascl', crop_pct=1.0, num_classes=0), 'aimv2_1b_patch14_224.apple_pt': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, license='apple-ascl', crop_pct=1.0, num_classes=0), 'aimv2_3b_patch14_224.apple_pt': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, license='apple-ascl', crop_pct=1.0, num_classes=0), 'aimv2_large_patch14_336.apple_pt': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, license='apple-ascl', input_size=(3, 336, 336), crop_pct=1.0, num_classes=0), 'aimv2_large_patch14_336.apple_pt_dist': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, license='apple-ascl', input_size=(3, 336, 336), crop_pct=1.0, num_classes=0), 'aimv2_huge_patch14_336.apple_pt': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, license='apple-ascl', input_size=(3, 336, 336), crop_pct=1.0, num_classes=0), 'aimv2_1b_patch14_336.apple_pt': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, license='apple-ascl', input_size=(3, 336, 336), crop_pct=1.0, num_classes=0), 'aimv2_3b_patch14_336.apple_pt': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, license='apple-ascl', input_size=(3, 336, 336), crop_pct=1.0, num_classes=0), 'aimv2_large_patch14_448.apple_pt': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, license='apple-ascl', input_size=(3, 448, 448), crop_pct=1.0, num_classes=0), 'aimv2_huge_patch14_448.apple_pt': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, license='apple-ascl', input_size=(3, 448, 448), crop_pct=1.0, num_classes=0), 'aimv2_1b_patch14_448.apple_pt': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, license='apple-ascl', input_size=(3, 448, 448), crop_pct=1.0, num_classes=0), 'aimv2_3b_patch14_448.apple_pt': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, license='apple-ascl', input_size=(3, 448, 448), crop_pct=1.0, num_classes=0), 'test_vit.r160_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 160, 160), crop_pct=0.95), 'test_vit2.r160_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 160, 160), crop_pct=0.95), 'test_vit3.r160_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 160, 160), crop_pct=0.95), 'test_vit4.r160_in1k': _cfg( input_size=(3, 160, 160), crop_pct=0.95), # BEiT3 models (remapped to VisionTransformer with scale_attn_norm=True, scale_mlp_norm=True) 'beit3_base_patch16_224.in22k_ft_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, crop_pct=1.0), 'beit3_base_patch16_224.indomain_in22k_ft_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, crop_pct=1.0), 'beit3_large_patch16_224.in22k_ft_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, crop_pct=1.0), 'beit3_large_patch16_224.indomain_in22k_ft_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, crop_pct=1.0), 'beit3_giant_patch14_224.untrained': _cfg( url='', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, crop_pct=1.0), 'beit3_giant_patch14_336.untrained': _cfg( url='', input_size=(3, 336, 336), mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, crop_pct=1.0), 'beit3_base_patch16_224.pt': _cfg( hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, crop_pct=1.0, num_classes=0, ), 'beit3_base_patch16_224.indomain_pt': _cfg( hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, crop_pct=1.0, num_classes=0, ), 'beit3_large_patch16_224.pt': _cfg( hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, crop_pct=1.0, num_classes=0, ), 'beit3_large_patch16_224.indomain_pt': _cfg( hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, crop_pct=1.0, num_classes=0, ), } _quick_gelu_cfgs = [n for n, c in default_cfgs.items() if c.get('notes', ()) and 'quickgelu' in c['notes'][0]] for n in _quick_gelu_cfgs: # generate quickgelu default cfgs based on contents of notes field c = copy.deepcopy(default_cfgs[n]) if c['hf_hub_id'] == 'timm/': c['hf_hub_id'] = 'timm/' + n # need to use non-quickgelu model name for hub id default_cfgs[n.replace('_clip_', '_clip_quickgelu_')] = c default_cfgs = generate_default_cfgs(default_cfgs) # Global flag to use NaFlexVit instead of VisionTransformer _USE_NAFLEX_DEFAULT = os.environ.get('TIMM_USE_NAFLEXVIT', 'false').lower() == 'true' def _create_vision_transformer( variant: str, pretrained: bool = False, use_naflex: Optional[bool] = None, **kwargs, ) -> Union[VisionTransformer, 'NaFlexVit']: # Check if we should use NaFlexVit instead if use_naflex is None: use_naflex = _USE_NAFLEX_DEFAULT if use_naflex: # Import here to avoid circular imports from .naflexvit import _create_naflexvit_from_classic return _create_naflexvit_from_classic(variant, pretrained, **kwargs) out_indices = kwargs.pop('out_indices', 3) if 'flexi' in variant: # FIXME Google FlexiViT pretrained models have a strong preference for bilinear patch / embed # interpolation, other pretrained models resize better w/ anti-aliased bicubic interpolation. _filter_fn = partial(checkpoint_filter_fn, interpolation='bilinear', antialias=False) else: _filter_fn = checkpoint_filter_fn # FIXME attn pool (currently only in siglip) params removed if pool disabled, is there a better soln? strict = kwargs.pop('pretrained_strict', True) if 'siglip' in variant and kwargs.get('global_pool', None) != 'map': strict = False return build_model_with_cfg( VisionTransformer, variant, pretrained, pretrained_filter_fn=_filter_fn, pretrained_strict=strict, feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), **kwargs, ) @register_model def vit_tiny_patch16_224(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Tiny (Vit-Ti/16) """ model_args = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3) model = _create_vision_transformer('vit_tiny_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_tiny_patch16_384(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Tiny (Vit-Ti/16) @ 384x384. """ model_args = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3) model = _create_vision_transformer('vit_tiny_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_patch32_224(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Small (ViT-S/32) """ model_args = dict(patch_size=32, embed_dim=384, depth=12, num_heads=6) model = _create_vision_transformer('vit_small_patch32_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_patch32_384(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Small (ViT-S/32) at 384x384. """ model_args = dict(patch_size=32, embed_dim=384, depth=12, num_heads=6) model = _create_vision_transformer('vit_small_patch32_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_patch16_224(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Small (ViT-S/16) """ model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6) model = _create_vision_transformer('vit_small_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_patch16_384(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Small (ViT-S/16) """ model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6) model = _create_vision_transformer('vit_small_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_patch8_224(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Small (ViT-S/8) """ model_args = dict(patch_size=8, embed_dim=384, depth=12, num_heads=6) model = _create_vision_transformer('vit_small_patch8_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch32_224(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Base (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k, source https://github.com/google-research/vision_transformer. """ model_args = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12) model = _create_vision_transformer('vit_base_patch32_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch32_384(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. """ model_args = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12) model = _create_vision_transformer('vit_base_patch32_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_224(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer. """ model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12) model = _create_vision_transformer('vit_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_384(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. """ model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12) model = _create_vision_transformer('vit_base_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch8_224(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Base (ViT-B/8) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer. """ model_args = dict(patch_size=8, embed_dim=768, depth=12, num_heads=12) model = _create_vision_transformer('vit_base_patch8_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch32_224(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). No pretrained weights. """ model_args = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16) model = _create_vision_transformer('vit_large_patch32_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch32_384(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. """ model_args = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16) model = _create_vision_transformer('vit_large_patch32_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch16_224(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer. """ model_args = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16) model = _create_vision_transformer('vit_large_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch16_384(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. """ model_args = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16) model = _create_vision_transformer('vit_large_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch14_224(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Large model (ViT-L/14) """ model_args = dict(patch_size=14, embed_dim=1024, depth=24, num_heads=16) model = _create_vision_transformer('vit_large_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_huge_patch14_224(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Huge model (ViT-H/14) from original paper (https://arxiv.org/abs/2010.11929). """ model_args = dict(patch_size=14, embed_dim=1280, depth=32, num_heads=16) model = _create_vision_transformer('vit_huge_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_giant_patch14_224(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Giant (little-g) model (ViT-g/14) from `Scaling Vision Transformers` - https://arxiv.org/abs/2106.04560 """ model_args = dict(patch_size=14, embed_dim=1408, mlp_ratio=48/11, depth=40, num_heads=16) model = _create_vision_transformer('vit_giant_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_gigantic_patch14_224(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Gigantic (big-G) model (ViT-G/14) from `Scaling Vision Transformers` - https://arxiv.org/abs/2106.04560 """ model_args = dict(patch_size=14, embed_dim=1664, mlp_ratio=64/13, depth=48, num_heads=16) model = _create_vision_transformer( 'vit_gigantic_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_224_miil(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K """ model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False) model = _create_vision_transformer( 'vit_base_patch16_224_miil', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_medium_patch16_gap_240(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Medium (ViT-M/16) w/o class token, w/ avg-pool @ 240x240 """ model_args = dict( patch_size=16, embed_dim=512, depth=12, num_heads=8, class_token=False, global_pool='avg', qkv_bias=False, init_values=1e-6, fc_norm=False) model = _create_vision_transformer( 'vit_medium_patch16_gap_240', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_medium_patch16_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Medium (ViT-M/16) w/o class token, w/ avg-pool @ 256x256 """ model_args = dict( patch_size=16, embed_dim=512, depth=12, num_heads=8, class_token=False, global_pool='avg', qkv_bias=False, init_values=1e-6, fc_norm=False) model = _create_vision_transformer( 'vit_medium_patch16_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_medium_patch16_gap_384(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Medium (ViT-M/16) w/o class token, w/ avg-pool @ 384x384 """ model_args = dict( patch_size=16, embed_dim=512, depth=12, num_heads=8, class_token=False, global_pool='avg', qkv_bias=False, init_values=1e-6, fc_norm=False) model = _create_vision_transformer( 'vit_medium_patch16_gap_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_betwixt_patch16_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Betwixt (ViT-b/16) w/o class token, w/ avg-pool @ 256x256 """ model_args = dict( patch_size=16, embed_dim=640, depth=12, num_heads=10, class_token=False, global_pool='avg', qkv_bias=False, init_values=1e-6, fc_norm=False) model = _create_vision_transformer( 'vit_medium_patch16_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_gap_224(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Base (ViT-B/16) w/o class token, w/ avg-pool @ 224x224 """ model_args = dict( patch_size=16, embed_dim=768, depth=12, num_heads=16, class_token=False, global_pool='avg', fc_norm=False) model = _create_vision_transformer( 'vit_base_patch16_gap_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_huge_patch14_gap_224(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Huge model (ViT-H/14) w/ no class token, avg pool """ model_args = dict( patch_size=14, embed_dim=1280, depth=32, num_heads=16, class_token=False, global_pool='avg', fc_norm=False) model = _create_vision_transformer( 'vit_huge_patch14_gap_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_huge_patch16_gap_448(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Huge model (ViT-H/16) w/ no class token, avg pool @ 448x448 """ model_args = dict( patch_size=16, embed_dim=1280, depth=32, num_heads=16, class_token=False, global_pool='avg', fc_norm=False) model = _create_vision_transformer( 'vit_huge_patch16_gap_448', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_giant_patch16_gap_224(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Giant (little-gg) model (ViT-g/16) w/ no class token, avg pool """ model_args = dict( patch_size=16, embed_dim=1408, depth=40, num_heads=16, mlp_ratio=48/11, class_token=False, global_pool='avg', fc_norm=False) model = _create_vision_transformer( 'vit_giant_patch16_gap_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_xsmall_patch16_clip_224(pretrained: bool = False, **kwargs) -> VisionTransformer: # TinyCLIP 8M model_args = dict(embed_dim=256, depth=10, num_heads=4, pre_norm=True, norm_layer=partial(LayerNorm, eps=1e-5)) model = _create_vision_transformer( 'vit_xsmall_patch16_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_medium_patch32_clip_224(pretrained: bool = False, **kwargs) -> VisionTransformer: # TinyCLIP 40M model_args = dict( patch_size=32, embed_dim=512, depth=12, num_heads=8, pre_norm=True, norm_layer=partial(LayerNorm, eps=1e-5)) model = _create_vision_transformer( 'vit_medium_patch32_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_medium_patch16_clip_224(pretrained: bool = False, **kwargs) -> VisionTransformer: # TinyCLIP 39M model_args = dict(embed_dim=512, depth=12, num_heads=8, pre_norm=True, norm_layer=partial(LayerNorm, eps=1e-5)) model = _create_vision_transformer( 'vit_medium_patch16_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_betwixt_patch32_clip_224(pretrained: bool = False, **kwargs) -> VisionTransformer: # TinyCLIP 61M model_args = dict( patch_size=32, embed_dim=640, depth=12, num_heads=10, pre_norm=True, norm_layer=partial(LayerNorm, eps=1e-5)) model = _create_vision_transformer( 'vit_betwixt_patch32_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch32_clip_224(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-B/32 CLIP image tower @ 224x224 """ model_args = dict( patch_size=32, embed_dim=768, depth=12, num_heads=12, pre_norm=True, norm_layer=partial(LayerNorm, eps=1e-5)) model = _create_vision_transformer( 'vit_base_patch32_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch32_clip_256(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-B/32 CLIP image tower @ 256x256 """ model_args = dict( patch_size=32, embed_dim=768, depth=12, num_heads=12, pre_norm=True, norm_layer=partial(LayerNorm, eps=1e-5)) model = _create_vision_transformer( 'vit_base_patch32_clip_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch32_clip_384(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-B/32 CLIP image tower @ 384x384 """ model_args = dict( patch_size=32, embed_dim=768, depth=12, num_heads=12, pre_norm=True, norm_layer=partial(LayerNorm, eps=1e-5)) model = _create_vision_transformer( 'vit_base_patch32_clip_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch32_clip_448(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-B/32 CLIP image tower @ 448x448 """ model_args = dict( patch_size=32, embed_dim=768, depth=12, num_heads=12, pre_norm=True, norm_layer=partial(LayerNorm, eps=1e-5)) model = _create_vision_transformer( 'vit_base_patch32_clip_448', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_clip_224(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-B/16 CLIP image tower """ model_args = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, pre_norm=True, norm_layer=partial(LayerNorm, eps=1e-5)) model = _create_vision_transformer( 'vit_base_patch16_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_clip_384(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-B/16 CLIP image tower @ 384x384 """ model_args = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, pre_norm=True, norm_layer=partial(LayerNorm, eps=1e-5)) model = _create_vision_transformer( 'vit_base_patch16_clip_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_plus_clip_240(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Base (ViT-B/16+) CLIP image tower @ 240x240 """ model_args = dict( patch_size=16, embed_dim=896, depth=12, num_heads=14, pre_norm=True, norm_layer=partial(LayerNorm, eps=1e-5)) model = _create_vision_transformer( 'vit_base_patch16_plus_clip_240', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch14_clip_224(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Large model (ViT-L/14) CLIP image tower """ model_args = dict( patch_size=14, embed_dim=1024, depth=24, num_heads=16, pre_norm=True, norm_layer=partial(LayerNorm, eps=1e-5)) model = _create_vision_transformer( 'vit_large_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch14_clip_336(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Large model (ViT-L/14) CLIP image tower @ 336x336 """ model_args = dict( patch_size=14, embed_dim=1024, depth=24, num_heads=16, pre_norm=True, norm_layer=partial(LayerNorm, eps=1e-5)) model = _create_vision_transformer( 'vit_large_patch14_clip_336', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_huge_patch14_clip_224(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Huge model (ViT-H/14) CLIP image tower. """ model_args = dict( patch_size=14, embed_dim=1280, depth=32, num_heads=16, pre_norm=True, norm_layer=partial(LayerNorm, eps=1e-5)) model = _create_vision_transformer( 'vit_huge_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_huge_patch14_clip_336(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Huge model (ViT-H/14) CLIP image tower @ 336x336 """ model_args = dict( patch_size=14, embed_dim=1280, depth=32, num_heads=16, pre_norm=True, norm_layer=partial(LayerNorm, eps=1e-5)) model = _create_vision_transformer( 'vit_huge_patch14_clip_336', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_huge_patch14_clip_378(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Huge model (ViT-H/14) CLIP image tower @ 378x378 """ model_args = dict( patch_size=14, embed_dim=1280, depth=32, num_heads=16, pre_norm=True, norm_layer=partial(LayerNorm, eps=1e-5)) model = _create_vision_transformer( 'vit_huge_patch14_clip_378', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_giant_patch14_clip_224(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Giant (little-g) model (ViT-g/14) from `Scaling Vision Transformers` - https://arxiv.org/abs/2106.04560 Pretrained weights from CLIP image tower. """ model_args = dict( patch_size=14, embed_dim=1408, mlp_ratio=48/11, depth=40, num_heads=16, pre_norm=True, norm_layer=partial(LayerNorm, eps=1e-5), ) model = _create_vision_transformer( 'vit_giant_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_gigantic_patch14_clip_224(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-bigG model (ViT-G/14) from `Scaling Vision Transformers` - https://arxiv.org/abs/2106.04560 Pretrained weights from CLIP image tower. """ model_args = dict( patch_size=14, embed_dim=1664, mlp_ratio=64/13, depth=48, num_heads=16, pre_norm=True, norm_layer=partial(LayerNorm, eps=1e-5), ) model = _create_vision_transformer( 'vit_gigantic_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_gigantic_patch14_clip_378(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-bigG model (ViT-G/14) from `Scaling Vision Transformers` - https://arxiv.org/abs/2106.04560 Pretrained weights from CLIP image tower. """ model_args = dict( patch_size=14, embed_dim=1664, mlp_ratio=64/13, depth=48, num_heads=16, pre_norm=True, norm_layer=partial(LayerNorm, eps=1e-5), ) model = _create_vision_transformer( 'vit_gigantic_patch14_clip_378', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch32_clip_quickgelu_224(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-B/32 CLIP image tower @ 224x224 """ model_args = dict( patch_size=32, embed_dim=768, depth=12, num_heads=12, pre_norm=True, norm_layer=partial(LayerNorm, eps=1e-5), act_layer='quick_gelu' ) model = _create_vision_transformer( 'vit_base_patch32_clip_quickgelu_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_clip_quickgelu_224(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-B/16 CLIP image tower w/ QuickGELU act """ model_args = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, pre_norm=True, norm_layer=partial(LayerNorm, eps=1e-5), act_layer='quick_gelu' ) model = _create_vision_transformer( 'vit_base_patch16_clip_quickgelu_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch14_clip_quickgelu_224(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Large model (ViT-L/14) CLIP image tower w/ QuickGELU act """ model_args = dict( patch_size=14, embed_dim=1024, depth=24, num_heads=16, pre_norm=True, norm_layer=partial(LayerNorm, eps=1e-5), act_layer='quick_gelu' ) model = _create_vision_transformer( 'vit_large_patch14_clip_quickgelu_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch14_clip_quickgelu_336(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Large model (ViT-L/14) CLIP image tower @ 336x336 w/ QuickGELU act """ model_args = dict( patch_size=14, embed_dim=1024, depth=24, num_heads=16, pre_norm=True, norm_layer=partial(LayerNorm, eps=1e-5), act_layer='quick_gelu' ) model = _create_vision_transformer( 'vit_large_patch14_clip_quickgelu_336', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_huge_patch14_clip_quickgelu_224(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Huge model (ViT-H/14) CLIP image tower w/ QuickGELU act. """ model_args = dict( patch_size=14, embed_dim=1280, depth=32, num_heads=16, pre_norm=True, norm_layer=partial(LayerNorm, eps=1e-5), act_layer='quick_gelu' ) model = _create_vision_transformer( 'vit_huge_patch14_clip_quickgelu_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_huge_patch14_clip_quickgelu_378(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Huge model (ViT-H/14) CLIP image tower @ 378x378 w/ QuickGELU act """ model_args = dict( patch_size=14, embed_dim=1280, depth=32, num_heads=16, pre_norm=True, norm_layer=partial(LayerNorm, eps=1e-5), act_layer='quick_gelu' ) model = _create_vision_transformer( 'vit_huge_patch14_clip_quickgelu_378', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_gigantic_patch14_clip_quickgelu_224(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-bigG model (ViT-G/14) w/ QuickGELU act """ model_args = dict( patch_size=14, embed_dim=1664, mlp_ratio=64/13, depth=48, num_heads=16, pre_norm=True, norm_layer=partial(LayerNorm, eps=1e-5), act_layer='quick_gelu' ) model = _create_vision_transformer( 'vit_gigantic_patch14_clip_quickgelu_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model # Experimental models below @register_model def vit_base_patch32_plus_256(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Base (ViT-B/32+) """ model_args = dict(patch_size=32, embed_dim=896, depth=12, num_heads=14, init_values=1e-5) model = _create_vision_transformer( 'vit_base_patch32_plus_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_plus_240(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Base (ViT-B/16+) """ model_args = dict(patch_size=16, embed_dim=896, depth=12, num_heads=14, init_values=1e-5) model = _create_vision_transformer( 'vit_base_patch16_plus_240', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_rpn_224(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Base (ViT-B/16) w/ residual post-norm """ model_args = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, init_values=1e-5, class_token=False, block_fn=ResPostBlock, global_pool='avg') model = _create_vision_transformer( 'vit_base_patch16_rpn_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_patch16_36x1_224(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Base w/ LayerScale + 36 x 1 (36 block serial) config. Experimental, may remove. Based on `Three things everyone should know about Vision Transformers` - https://arxiv.org/abs/2203.09795 Paper focuses on 24x2 + 48x1 for 'Small' width but those are extremely slow. """ model_args = dict(patch_size=16, embed_dim=384, depth=36, num_heads=6, init_values=1e-5) model = _create_vision_transformer( 'vit_small_patch16_36x1_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_patch16_18x2_224(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Small w/ LayerScale + 18 x 2 (36 block parallel) config. Experimental, may remove. Based on `Three things everyone should know about Vision Transformers` - https://arxiv.org/abs/2203.09795 Paper focuses on 24x2 + 48x1 for 'Small' width but those are extremely slow. """ model_args = dict( patch_size=16, embed_dim=384, depth=18, num_heads=6, init_values=1e-5, block_fn=ParallelThingsBlock) model = _create_vision_transformer( 'vit_small_patch16_18x2_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_18x2_224(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Base w/ LayerScale + 18 x 2 (36 block parallel) config. Experimental, may remove. Based on `Three things everyone should know about Vision Transformers` - https://arxiv.org/abs/2203.09795 """ model_args = dict( patch_size=16, embed_dim=768, depth=18, num_heads=12, init_values=1e-5, block_fn=ParallelThingsBlock) model = _create_vision_transformer( 'vit_base_patch16_18x2_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva_large_patch14_196(pretrained: bool = False, **kwargs) -> VisionTransformer: """ EVA-large model https://arxiv.org/abs/2211.07636 /via MAE MIM pretrain""" model_args = dict(patch_size=14, embed_dim=1024, depth=24, num_heads=16, global_pool='avg') model = _create_vision_transformer( 'eva_large_patch14_196', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva_large_patch14_336(pretrained: bool = False, **kwargs) -> VisionTransformer: """ EVA-large model https://arxiv.org/abs/2211.07636 via MAE MIM pretrain""" model_args = dict(patch_size=14, embed_dim=1024, depth=24, num_heads=16, global_pool='avg') model = _create_vision_transformer('eva_large_patch14_336', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def flexivit_small(pretrained: bool = False, **kwargs) -> VisionTransformer: """ FlexiViT-Small """ model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, no_embed_class=True) model = _create_vision_transformer('flexivit_small', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def flexivit_base(pretrained: bool = False, **kwargs) -> VisionTransformer: """ FlexiViT-Base """ model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, no_embed_class=True) model = _create_vision_transformer('flexivit_base', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def flexivit_large(pretrained: bool = False, **kwargs) -> VisionTransformer: """ FlexiViT-Large """ model_args = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, no_embed_class=True) model = _create_vision_transformer('flexivit_large', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_xp_224(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Large model (ViT-L/14) w/ parallel blocks and qk norm enabled. """ model_args = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, pre_norm=True, no_embed_class=True, norm_layer=RmsNorm, block_fn=ParallelScalingBlock, qkv_bias=False, qk_norm=True, ) model = _create_vision_transformer( 'vit_base_patch16_xp_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch14_xp_224(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Large model (ViT-L/14) w/ parallel blocks and qk norm enabled. """ model_args = dict( patch_size=14, embed_dim=1024, depth=24, num_heads=16, pre_norm=True, no_embed_class=True, norm_layer=RmsNorm, block_fn=ParallelScalingBlock, qkv_bias=False, qk_norm=True, ) model = _create_vision_transformer( 'vit_large_patch14_xp_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_huge_patch14_xp_224(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-Huge model (ViT-H/14) w/ parallel blocks and qk norm enabled. """ model_args = dict( patch_size=14, embed_dim=1280, depth=32, num_heads=16, pre_norm=True, no_embed_class=True, norm_layer=RmsNorm, block_fn=ParallelScalingBlock, qkv_bias=False, qk_norm=True, ) model = _create_vision_transformer( 'vit_huge_patch14_xp_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_patch14_dinov2(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-S/14 for DINOv2 """ model_args = dict(patch_size=14, embed_dim=384, depth=12, num_heads=6, init_values=1e-5) model = _create_vision_transformer( 'vit_small_patch14_dinov2', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch14_dinov2(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-B/14 for DINOv2 """ model_args = dict(patch_size=14, embed_dim=768, depth=12, num_heads=12, init_values=1e-5) model = _create_vision_transformer( 'vit_base_patch14_dinov2', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch14_dinov2(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-L/14 for DINOv2 """ model_args = dict(patch_size=14, embed_dim=1024, depth=24, num_heads=16, init_values=1e-5) model = _create_vision_transformer( 'vit_large_patch14_dinov2', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_giant_patch14_dinov2(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-G/14 for DINOv2 """ # The hidden_features of SwiGLU is calculated by: # hidden_features = (int(hidden_features * 2 / 3) + 7) // 8 * 8 # When embed_dim=1536, hidden_features=4096 # With SwiGLUPacked, we need to set hidden_features = 2 * 4096 = 8192 model_args = dict( patch_size=14, embed_dim=1536, depth=40, num_heads=24, init_values=1e-5, mlp_ratio=2.66667 * 2, mlp_layer=SwiGLUPacked, act_layer=nn.SiLU ) model = _create_vision_transformer( 'vit_giant_patch14_dinov2', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_patch14_reg4_dinov2(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-S/14 for DINOv2 w/ 4 registers """ model_args = dict( patch_size=14, embed_dim=384, depth=12, num_heads=6, init_values=1e-5, reg_tokens=4, no_embed_class=True, ) model = _create_vision_transformer( 'vit_small_patch14_reg4_dinov2', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch14_reg4_dinov2(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-B/14 for DINOv2 w/ 4 registers """ model_args = dict( patch_size=14, embed_dim=768, depth=12, num_heads=12, init_values=1e-5, reg_tokens=4, no_embed_class=True, ) model = _create_vision_transformer( 'vit_base_patch14_reg4_dinov2', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch14_reg4_dinov2(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-L/14 for DINOv2 w/ 4 registers """ model_args = dict( patch_size=14, embed_dim=1024, depth=24, num_heads=16, init_values=1e-5, reg_tokens=4, no_embed_class=True, ) model = _create_vision_transformer( 'vit_large_patch14_reg4_dinov2', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_giant_patch14_reg4_dinov2(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT-G/14 for DINOv2 """ # The hidden_features of SwiGLU is calculated by: # hidden_features = (int(hidden_features * 2 / 3) + 7) // 8 * 8 # When embed_dim=1536, hidden_features=4096 # With SwiGLUPacked, we need to set hidden_features = 2 * 4096 = 8192 model_args = dict( patch_size=14, embed_dim=1536, depth=40, num_heads=24, init_values=1e-5, mlp_ratio=2.66667 * 2, mlp_layer=SwiGLUPacked, act_layer=nn.SiLU, reg_tokens=4, no_embed_class=True, ) model = _create_vision_transformer( 'vit_giant_patch14_reg4_dinov2', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch32_siglip_256(pretrained: bool = False, **kwargs) -> VisionTransformer: model_args = dict( patch_size=32, embed_dim=768, depth=12, num_heads=12, class_token=False, global_pool='map', act_layer='gelu_tanh', ) model = _create_vision_transformer( 'vit_base_patch32_siglip_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_siglip_224(pretrained: bool = False, **kwargs) -> VisionTransformer: model_args = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, class_token=False, global_pool='map', ) model = _create_vision_transformer( 'vit_base_patch16_siglip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_siglip_256(pretrained: bool = False, **kwargs) -> VisionTransformer: model_args = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, class_token=False, global_pool='map', ) model = _create_vision_transformer( 'vit_base_patch16_siglip_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_siglip_384(pretrained: bool = False, **kwargs) -> VisionTransformer: model_args = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, class_token=False, global_pool='map', ) model = _create_vision_transformer( 'vit_base_patch16_siglip_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_siglip_512(pretrained: bool = False, **kwargs) -> VisionTransformer: model_args = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, class_token=False, global_pool='map', ) model = _create_vision_transformer( 'vit_base_patch16_siglip_512', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch16_siglip_256(pretrained: bool = False, **kwargs) -> VisionTransformer: model_args = dict( patch_size=16, embed_dim=1024, depth=24, num_heads=16, class_token=False, global_pool='map', ) model = _create_vision_transformer( 'vit_large_patch16_siglip_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch16_siglip_384(pretrained: bool = False, **kwargs) -> VisionTransformer: model_args = dict( patch_size=16, embed_dim=1024, depth=24, num_heads=16, class_token=False, global_pool='map', ) model = _create_vision_transformer( 'vit_large_patch16_siglip_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch16_siglip_512(pretrained: bool = False, **kwargs) -> VisionTransformer: model_args = dict( patch_size=16, embed_dim=1024, depth=24, num_heads=16, class_token=False, global_pool='map', act_layer='gelu_tanh' ) model = _create_vision_transformer( 'vit_large_patch16_siglip_512', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_so400m_patch14_siglip_224(pretrained: bool = False, **kwargs) -> VisionTransformer: model_args = dict( patch_size=14, embed_dim=1152, depth=27, num_heads=16, mlp_ratio=3.7362, class_token=False, global_pool='map', ) model = _create_vision_transformer( 'vit_so400m_patch14_siglip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_so400m_patch14_siglip_378(pretrained: bool = False, **kwargs) -> VisionTransformer: # this is a corrected variant of the 384 with a res properly divisible by patch size (no padding/truncation) model_args = dict( patch_size=14, embed_dim=1152, depth=27, num_heads=16, mlp_ratio=3.7362, class_token=False, global_pool='map', ) model = _create_vision_transformer( 'vit_so400m_patch14_siglip_378', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_so400m_patch14_siglip_384(pretrained: bool = False, **kwargs) -> VisionTransformer: model_args = dict( patch_size=14, embed_dim=1152, depth=27, num_heads=16, mlp_ratio=3.7362, class_token=False, global_pool='map', ) model = _create_vision_transformer( 'vit_so400m_patch14_siglip_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_so400m_patch16_siglip_256(pretrained: bool = False, **kwargs) -> VisionTransformer: model_args = dict( patch_size=16, embed_dim=1152, depth=27, num_heads=16, mlp_ratio=3.7362, class_token=False, global_pool='map', act_layer='gelu_tanh', ) model = _create_vision_transformer( 'vit_so400m_patch16_siglip_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_so400m_patch16_siglip_384(pretrained: bool = False, **kwargs) -> VisionTransformer: model_args = dict( patch_size=16, embed_dim=1152, depth=27, num_heads=16, mlp_ratio=3.7362, class_token=False, global_pool='map', act_layer='gelu_tanh', ) model = _create_vision_transformer( 'vit_so400m_patch16_siglip_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_so400m_patch16_siglip_512(pretrained: bool = False, **kwargs) -> VisionTransformer: model_args = dict( patch_size=16, embed_dim=1152, depth=27, num_heads=16, mlp_ratio=3.7362, class_token=False, global_pool='map', act_layer='gelu_tanh', ) model = _create_vision_transformer( 'vit_so400m_patch16_siglip_512', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_giantopt_patch16_siglip_256(pretrained: bool = False, **kwargs) -> VisionTransformer: model_args = dict( patch_size=16, embed_dim=1536, depth=40, num_heads=16, class_token=False, global_pool='map', act_layer='gelu_tanh', ) model = _create_vision_transformer( 'vit_giantopt_patch16_siglip_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_giantopt_patch16_siglip_384(pretrained: bool = False, **kwargs) -> VisionTransformer: model_args = dict( patch_size=16, embed_dim=1536, depth=40, num_heads=16, class_token=False, global_pool='map', act_layer='gelu_tanh', ) model = _create_vision_transformer( 'vit_giantopt_patch16_siglip_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch32_siglip_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer: model_args = dict( patch_size=32, embed_dim=768, depth=12, num_heads=12, class_token=False, global_pool='avg', fc_norm=False, act_layer='gelu_tanh', ) model = _create_vision_transformer( 'vit_base_patch32_siglip_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_siglip_gap_224(pretrained: bool = False, **kwargs) -> VisionTransformer: """ A SigLIP variant of ViT with global average pooling (GAP) instead of attention pooling (MAP).""" model_args = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, class_token=False, global_pool='avg', fc_norm=False, ) model = _create_vision_transformer( 'vit_base_patch16_siglip_gap_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_siglip_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer: """ A SigLIP variant of ViT with global average pooling (GAP) instead of attention pooling (MAP).""" model_args = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, class_token=False, global_pool='avg', fc_norm=False, ) model = _create_vision_transformer( 'vit_base_patch16_siglip_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_siglip_gap_384(pretrained: bool = False, **kwargs) -> VisionTransformer: """ A SigLIP variant of ViT with global average pooling (GAP) instead of attention pooling (MAP).""" model_args = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, class_token=False, global_pool='avg', fc_norm=False, ) model = _create_vision_transformer( 'vit_base_patch16_siglip_gap_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_siglip_gap_512(pretrained: bool = False, **kwargs) -> VisionTransformer: """ A SigLIP variant of ViT with global average pooling (GAP) instead of attention pooling (MAP).""" model_args = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, class_token=False, global_pool='avg', fc_norm=False, ) model = _create_vision_transformer( 'vit_base_patch16_siglip_gap_512', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch16_siglip_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer: """ A SigLIP variant of ViT with global average pooling (GAP) instead of attention pooling (MAP).""" model_args = dict( patch_size=16, embed_dim=1024, depth=24, num_heads=16, class_token=False, global_pool='avg', fc_norm=False, ) model = _create_vision_transformer( 'vit_large_patch16_siglip_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch16_siglip_gap_384(pretrained: bool = False, **kwargs) -> VisionTransformer: """ A SigLIP variant of ViT with global average pooling (GAP) instead of attention pooling (MAP).""" model_args = dict( patch_size=16, embed_dim=1024, depth=24, num_heads=16, class_token=False, global_pool='avg', fc_norm=False, ) model = _create_vision_transformer( 'vit_large_patch16_siglip_gap_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch16_siglip_gap_512(pretrained: bool = False, **kwargs) -> VisionTransformer: model_args = dict( patch_size=16, embed_dim=1024, depth=24, num_heads=16, class_token=False, global_pool='avg', fc_norm=False, act_layer='gelu_tanh' ) model = _create_vision_transformer( 'vit_large_patch16_siglip_gap_512', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_so400m_patch14_siglip_gap_224(pretrained: bool = False, **kwargs) -> VisionTransformer: """ A SigLIP variant of ViT with global average pooling (GAP) instead of attention pooling (MAP).""" model_args = dict( patch_size=14, embed_dim=1152, depth=27, num_heads=16, mlp_ratio=3.7362, class_token=False, global_pool='avg', fc_norm=False, ) model = _create_vision_transformer( 'vit_so400m_patch14_siglip_gap_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_so400m_patch14_siglip_gap_378(pretrained: bool = False, **kwargs) -> VisionTransformer: """ A SigLIP variant of ViT with global average pooling (GAP) instead of attention pooling (MAP).""" model_args = dict( patch_size=14, embed_dim=1152, depth=27, num_heads=16, mlp_ratio=3.7362, class_token=False, global_pool='avg', fc_norm=False, ) model = _create_vision_transformer( 'vit_so400m_patch14_siglip_gap_378', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_so400m_patch14_siglip_gap_384(pretrained: bool = False, **kwargs) -> VisionTransformer: """ A SigLIP variant of ViT with global average pooling (GAP) instead of attention pooling (MAP).""" model_args = dict( patch_size=14, embed_dim=1152, depth=27, num_heads=16, mlp_ratio=3.7362, class_token=False, global_pool='avg', fc_norm=False, ) model = _create_vision_transformer( 'vit_so400m_patch14_siglip_gap_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_so400m_patch14_siglip_gap_448(pretrained: bool = False, **kwargs) -> VisionTransformer: """ A SigLIP variant of ViT with global average pooling (GAP) instead of attention pooling (MAP).""" model_args = dict( patch_size=14, embed_dim=1152, depth=27, num_heads=16, mlp_ratio=3.7362, class_token=False, global_pool='avg', fc_norm=False, ) model = _create_vision_transformer( 'vit_so400m_patch14_siglip_gap_448', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_so400m_patch14_siglip_gap_896(pretrained: bool = False, **kwargs) -> VisionTransformer: """ A SigLIP variant of ViT with global average pooling (GAP) instead of attention pooling (MAP).""" model_args = dict( patch_size=14, embed_dim=1152, depth=27, num_heads=16, mlp_ratio=3.7362, class_token=False, global_pool='avg', fc_norm=False, ) model = _create_vision_transformer( 'vit_so400m_patch14_siglip_gap_896', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_so400m_patch16_siglip_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer: """ A SigLIP variant of ViT with global average pooling (GAP) instead of attention pooling (MAP).""" model_args = dict( patch_size=16, embed_dim=1152, depth=27, num_heads=16, mlp_ratio=3.7362, class_token=False, global_pool='avg', fc_norm=False, act_layer='gelu_tanh', ) model = _create_vision_transformer( 'vit_so400m_patch16_siglip_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_so400m_patch16_siglip_gap_384(pretrained: bool = False, **kwargs) -> VisionTransformer: model_args = dict( patch_size=16, embed_dim=1152, depth=27, num_heads=16, mlp_ratio=3.7362, class_token=False, global_pool='avg', fc_norm=False, act_layer='gelu_tanh' ) model = _create_vision_transformer( 'vit_so400m_patch16_siglip_gap_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_so400m_patch16_siglip_gap_512(pretrained: bool = False, **kwargs) -> VisionTransformer: model_args = dict( patch_size=16, embed_dim=1152, depth=27, num_heads=16, mlp_ratio=3.7362, class_token=False, global_pool='avg', fc_norm=False, act_layer='gelu_tanh' ) model = _create_vision_transformer( 'vit_so400m_patch16_siglip_gap_512', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_giantopt_patch16_siglip_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer: model_args = dict( patch_size=16, embed_dim=1536, depth=40, num_heads=16, class_token=False, global_pool='avg', fc_norm=False, act_layer='gelu_tanh' ) model = _create_vision_transformer( 'vit_giantopt_patch16_siglip_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_giantopt_patch16_siglip_gap_384(pretrained: bool = False, **kwargs) -> VisionTransformer: model_args = dict( patch_size=16, embed_dim=1536, depth=40, num_heads=16, class_token=False, global_pool='avg', fc_norm=False, act_layer='gelu_tanh' ) model = _create_vision_transformer( 'vit_giantopt_patch16_siglip_gap_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_wee_patch16_reg1_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer: model_args = dict( patch_size=16, embed_dim=256, depth=14, num_heads=4, init_values=1e-5, mlp_ratio=5, class_token=False, no_embed_class=True, reg_tokens=1, global_pool='avg', ) model = _create_vision_transformer( 'vit_wee_patch16_reg1_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_pwee_patch16_reg1_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer: model_args = dict( patch_size=16, embed_dim=256, depth=16, num_heads=4, init_values=1e-5, mlp_ratio=5, class_token=False, no_embed_class=True, reg_tokens=1, global_pool='avg', block_fn=ParallelScalingBlock, ) model = _create_vision_transformer( 'vit_pwee_patch16_reg1_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_little_patch16_reg1_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer: model_args = dict( patch_size=16, embed_dim=320, depth=14, num_heads=5, init_values=1e-5, mlp_ratio=5.6, class_token=False, no_embed_class=True, reg_tokens=1, global_pool='avg', ) model = _create_vision_transformer( 'vit_little_patch16_reg1_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_little_patch16_reg4_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer: model_args = dict( patch_size=16, embed_dim=320, depth=14, num_heads=5, init_values=1e-5, mlp_ratio=5.6, class_token=False, no_embed_class=True, reg_tokens=4, global_pool='avg', ) model = _create_vision_transformer( 'vit_little_patch16_reg4_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_medium_patch16_reg1_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer: model_args = dict( patch_size=16, embed_dim=512, depth=12, num_heads=8, init_values=1e-5, class_token=False, no_embed_class=True, reg_tokens=1, global_pool='avg', ) model = _create_vision_transformer( 'vit_medium_patch16_reg1_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_medium_patch16_reg4_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer: model_args = dict( patch_size=16, embed_dim=512, depth=12, num_heads=8, init_values=1e-5, class_token=False, no_embed_class=True, reg_tokens=4, global_pool='avg', ) model = _create_vision_transformer( 'vit_medium_patch16_reg4_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_mediumd_patch16_reg4_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer: model_args = dict( patch_size=16, embed_dim=512, depth=20, num_heads=8, init_values=1e-5, class_token=False, no_embed_class=True, reg_tokens=4, global_pool='avg', ) model = _create_vision_transformer( 'vit_mediumd_patch16_reg4_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_mediumd_patch16_reg4_gap_384(pretrained: bool = False, **kwargs) -> VisionTransformer: model_args = dict( patch_size=16, embed_dim=512, depth=20, num_heads=8, init_values=1e-5, class_token=False, no_embed_class=True, reg_tokens=4, global_pool='avg', ) model = _create_vision_transformer( 'vit_mediumd_patch16_reg4_gap_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_betwixt_patch16_reg1_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer: model_args = dict( patch_size=16, embed_dim=640, depth=12, num_heads=10, init_values=1e-5, class_token=False, no_embed_class=True, reg_tokens=1, global_pool='avg', ) model = _create_vision_transformer( 'vit_betwixt_patch16_reg1_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_betwixt_patch16_reg4_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer: model_args = dict( patch_size=16, embed_dim=640, depth=12, num_heads=10, init_values=1e-5, class_token=False, no_embed_class=True, reg_tokens=4, global_pool='avg', ) model = _create_vision_transformer( 'vit_betwixt_patch16_reg4_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_betwixt_patch16_reg4_gap_384(pretrained: bool = False, **kwargs) -> VisionTransformer: model_args = dict( patch_size=16, embed_dim=640, depth=12, num_heads=10, init_values=1e-5, class_token=False, no_embed_class=True, reg_tokens=4, global_pool='avg', ) model = _create_vision_transformer( 'vit_betwixt_patch16_reg4_gap_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_reg4_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer: model_args = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, class_token=False, no_embed_class=True, global_pool='avg', reg_tokens=4, ) model = _create_vision_transformer( 'vit_base_patch16_reg4_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_so150m_patch16_reg4_map_256(pretrained: bool = False, **kwargs) -> VisionTransformer: """ SO150M (shape optimized, but diff than paper def, optimized for GPU) """ model_args = dict( patch_size=16, embed_dim=896, depth=18, num_heads=14, mlp_ratio=2.572, class_token=False, reg_tokens=4, global_pool='map', ) model = _create_vision_transformer( 'vit_so150m_patch16_reg4_map_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_so150m_patch16_reg4_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer: """ SO150M (shape optimized, but diff than paper def, optimized for GPU) """ model_args = dict( patch_size=16, embed_dim=896, depth=18, num_heads=14, mlp_ratio=2.572, class_token=False, reg_tokens=4, global_pool='avg', fc_norm=False, ) model = _create_vision_transformer( 'vit_so150m_patch16_reg4_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_so150m_patch16_reg4_gap_384(pretrained: bool = False, **kwargs) -> VisionTransformer: """ SO150M (shape optimized, but diff than paper def, optimized for GPU) """ model_args = dict( patch_size=16, embed_dim=896, depth=18, num_heads=14, mlp_ratio=2.572, class_token=False, reg_tokens=4, global_pool='avg', fc_norm=False, ) model = _create_vision_transformer( 'vit_so150m_patch16_reg4_gap_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_so150m2_patch16_reg1_gap_256(pretrained: bool = False, **kwargs) -> VisionTransformer: """ SO150M v2 (shape optimized, but diff than paper def, optimized for GPU) """ model_args = dict( patch_size=16, embed_dim=832, depth=21, num_heads=13, mlp_ratio=34/13, init_values=1e-5, qkv_bias=False, class_token=False, reg_tokens=1, global_pool='avg', ) model = _create_vision_transformer( 'vit_so150m2_patch16_reg1_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_so150m2_patch16_reg1_gap_384(pretrained: bool = False, **kwargs) -> VisionTransformer: """ SO150M v2 (shape optimized, but diff than paper def, optimized for GPU) """ model_args = dict( patch_size=16, embed_dim=832, depth=21, num_heads=13, mlp_ratio=34/13, init_values=1e-5, qkv_bias=False, class_token=False, reg_tokens=1, global_pool='avg', ) model = _create_vision_transformer( 'vit_so150m2_patch16_reg1_gap_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_so150m2_patch16_reg1_gap_448(pretrained: bool = False, **kwargs) -> VisionTransformer: """ SO150M v2 (shape optimized, but diff than paper def, optimized for GPU) """ model_args = dict( patch_size=16, embed_dim=832, depth=21, num_heads=13, mlp_ratio=34/13, init_values=1e-5, qkv_bias=False, class_token=False, reg_tokens=1, global_pool='avg', ) model = _create_vision_transformer( 'vit_so150m2_patch16_reg1_gap_448', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_intern300m_patch14_448(pretrained: bool = False, **kwargs) -> VisionTransformer: model_args = dict( patch_size=14, embed_dim=1024, depth=24, num_heads=16, init_values=0.1, final_norm=False, dynamic_img_size=True, ) model = _create_vision_transformer( 'vit_intern300m_patch14_448', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def aimv2_large_patch14_224(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT Large AIM-v2 model """ model_args = dict( patch_size=14, embed_dim=1024, depth=24, num_heads=8, class_token=False, fc_norm=False, mlp_ratio=2.75, global_pool='avg', qkv_bias=False, proj_bias=False, act_layer='silu', norm_layer=partial(RmsNorm, eps=1e-5), embed_norm_layer=partial(RmsNorm, eps=1e-5), mlp_layer=SwiGLU, ) model = _create_vision_transformer( 'aimv2_large_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def aimv2_huge_patch14_224(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT Huge AIM-v2 model """ model_args = dict( patch_size=14, embed_dim=1536, depth=24, num_heads=12, class_token=False, fc_norm=False, mlp_ratio=2.6667, global_pool='avg', qkv_bias=False, proj_bias=False, act_layer='silu', norm_layer=partial(RmsNorm, eps=1e-5), embed_norm_layer=partial(RmsNorm, eps=1e-5), mlp_layer=SwiGLU, ) model = _create_vision_transformer( 'aimv2_huge_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def aimv2_1b_patch14_224(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT 1B AIM-v2 model """ model_args = dict( patch_size=14, embed_dim=2048, depth=24, num_heads=16, class_token=False, fc_norm=False, mlp_ratio=2.75, global_pool='avg', qkv_bias=False, proj_bias=False, act_layer='silu', norm_layer=partial(RmsNorm, eps=1e-5), embed_norm_layer=partial(RmsNorm, eps=1e-5), mlp_layer=SwiGLU, ) model = _create_vision_transformer( 'aimv2_1b_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def aimv2_3b_patch14_224(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT 3B AIM-v2 model """ model_args = dict( patch_size=14, embed_dim=3072, depth=24, num_heads=24, class_token=False, fc_norm=False, mlp_ratio=2.6667, global_pool='avg', qkv_bias=False, proj_bias=False, act_layer='silu', norm_layer=partial(RmsNorm, eps=1e-5), embed_norm_layer=partial(RmsNorm, eps=1e-5), mlp_layer=SwiGLU, ) model = _create_vision_transformer( 'aimv2_3b_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def aimv2_large_patch14_336(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT Large AIM-v2 model """ model_args = dict( patch_size=14, embed_dim=1024, depth=24, num_heads=8, class_token=False, fc_norm=False, mlp_ratio=2.75, global_pool='avg', qkv_bias=False, proj_bias=False, act_layer='silu', norm_layer=partial(RmsNorm, eps=1e-5), embed_norm_layer=partial(RmsNorm, eps=1e-5), mlp_layer=SwiGLU, ) model = _create_vision_transformer( 'aimv2_large_patch14_336', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def aimv2_huge_patch14_336(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT Huge AIM-v2 model """ model_args = dict( patch_size=14, embed_dim=1536, depth=24, num_heads=12, class_token=False, fc_norm=False, mlp_ratio=2.6667, global_pool='avg', qkv_bias=False, proj_bias=False, act_layer='silu', norm_layer=partial(RmsNorm, eps=1e-5), embed_norm_layer=partial(RmsNorm, eps=1e-5), mlp_layer=SwiGLU, ) model = _create_vision_transformer( 'aimv2_huge_patch14_336', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def aimv2_1b_patch14_336(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT 1B AIM-v2 model """ model_args = dict( patch_size=14, embed_dim=2048, depth=24, num_heads=16, class_token=False, fc_norm=False, mlp_ratio=2.75, global_pool='avg', qkv_bias=False, proj_bias=False, act_layer='silu', norm_layer=partial(RmsNorm, eps=1e-5), embed_norm_layer=partial(RmsNorm, eps=1e-5), mlp_layer=SwiGLU, ) model = _create_vision_transformer( 'aimv2_1b_patch14_336', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def aimv2_3b_patch14_336(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT 3B AIM-v2 model """ model_args = dict( patch_size=14, embed_dim=3072, depth=24, num_heads=24, class_token=False, fc_norm=False, mlp_ratio=2.6667, global_pool='avg', qkv_bias=False, proj_bias=False, act_layer='silu', norm_layer=partial(RmsNorm, eps=1e-5), embed_norm_layer=partial(RmsNorm, eps=1e-5), mlp_layer=SwiGLU, ) model = _create_vision_transformer( 'aimv2_3b_patch14_336', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def aimv2_large_patch14_448(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT Large AIM-v2 model """ model_args = dict( patch_size=14, embed_dim=1024, depth=24, num_heads=8, class_token=False, fc_norm=False, mlp_ratio=2.75, global_pool='avg', qkv_bias=False, proj_bias=False, act_layer='silu', norm_layer=partial(RmsNorm, eps=1e-5), embed_norm_layer=partial(RmsNorm, eps=1e-5), mlp_layer=SwiGLU, ) model = _create_vision_transformer( 'aimv2_large_patch14_448', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def aimv2_huge_patch14_448(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT Huge AIM-v2 model """ model_args = dict( patch_size=14, embed_dim=1536, depth=24, num_heads=12, class_token=False, fc_norm=False, mlp_ratio=2.6667, global_pool='avg', qkv_bias=False, proj_bias=False, act_layer='silu', norm_layer=partial(RmsNorm, eps=1e-5), embed_norm_layer=partial(RmsNorm, eps=1e-5), mlp_layer=SwiGLU, ) model = _create_vision_transformer( 'aimv2_huge_patch14_448', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def aimv2_1b_patch14_448(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT 1B AIM-v2 model """ model_args = dict( patch_size=14, embed_dim=2048, depth=24, num_heads=16, class_token=False, fc_norm=False, mlp_ratio=2.75, global_pool='avg', qkv_bias=False, proj_bias=False, act_layer='silu', norm_layer=partial(RmsNorm, eps=1e-5), embed_norm_layer=partial(RmsNorm, eps=1e-5), mlp_layer=SwiGLU, ) model = _create_vision_transformer( 'aimv2_1b_patch14_448', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def aimv2_3b_patch14_448(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT 3B AIM-v2 model """ model_args = dict( patch_size=14, embed_dim=3072, depth=24, num_heads=24, class_token=False, fc_norm=False, mlp_ratio=2.6667, global_pool='avg', qkv_bias=False, proj_bias=False, act_layer='silu', norm_layer=partial(RmsNorm, eps=1e-5), embed_norm_layer=partial(RmsNorm, eps=1e-5), mlp_layer=SwiGLU, ) model = _create_vision_transformer( 'aimv2_3b_patch14_448', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def test_vit(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT Test """ model_args = dict(patch_size=16, embed_dim=64, depth=6, num_heads=2, mlp_ratio=3, dynamic_img_size=True) model = _create_vision_transformer('test_vit', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def test_vit2(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT Test """ model_args = dict( patch_size=16, embed_dim=64, depth=8, num_heads=2, mlp_ratio=3, class_token=False, reg_tokens=1, global_pool='avg', init_values=1e-5, dynamic_img_size=True) model = _create_vision_transformer('test_vit2', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def test_vit3(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT Test """ model_args = dict( patch_size=16, embed_dim=96, depth=9, num_heads=3, mlp_ratio=2, class_token=False, reg_tokens=1, global_pool='map', pool_include_prefix=True, init_values=1e-5) model = _create_vision_transformer('test_vit3', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def test_vit4(pretrained: bool = False, **kwargs) -> VisionTransformer: """ ViT Test """ model_args = dict( patch_size=16, embed_dim=96, depth=9, num_heads=3, mlp_ratio=3, class_token=False, reg_tokens=1, global_pool='avg', init_values=1e-5, dynamic_img_size=True, norm_layer='rmsnorm', ) model = _create_vision_transformer('test_vit4', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def beit3_base_patch16_224(pretrained: bool = False, **kwargs) -> VisionTransformer: """ BEiT3 Base model (ViT-Base size) with patch size 16x16. Remapped to VisionTransformer with scale_norm=True. """ model_args = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, scale_attn_norm=True, scale_mlp_norm=True, class_token=True, global_pool='avg', norm_layer=partial(LayerNorm, eps=1e-5) ) model = _create_vision_transformer('beit3_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def beit3_large_patch16_224(pretrained: bool = False, **kwargs) -> VisionTransformer: """ BEiT3 Large model (ViT-Large size) with patch size 16x16. Remapped to VisionTransformer with scale_norm=True. """ model_args = dict( patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, scale_attn_norm=True, scale_mlp_norm=True, class_token=True, global_pool='avg', norm_layer=partial(LayerNorm, eps=1e-5), ) model = _create_vision_transformer('beit3_large_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def beit3_giant_patch14_224(pretrained: bool = False, **kwargs) -> VisionTransformer: """ BEiT3 Giant model with patch size 14x14. Remapped to VisionTransformer with scale_norm=True. """ model_args = dict( patch_size=14, embed_dim=1408, depth=40, num_heads=16, mlp_ratio=4.3637, scale_attn_norm=True, scale_mlp_norm=True, class_token=True, global_pool='avg', norm_layer=partial(LayerNorm, eps=1e-5), ) model = _create_vision_transformer('beit3_giant_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def beit3_giant_patch14_336(pretrained: bool = False, **kwargs) -> VisionTransformer: """ BEiT3 Giant model with patch size 14x14 and image size 336x336. Remapped to VisionTransformer with scale_norm=True. """ model_args = dict( img_size=336, patch_size=14, embed_dim=1408, depth=40, num_heads=16, mlp_ratio=4.3637, scale_attn_norm=True, scale_mlp_norm=True, class_token=True, global_pool='avg', norm_layer=partial(LayerNorm, eps=1e-5), ) model = _create_vision_transformer('beit3_giant_patch14_336', pretrained=pretrained, **dict(model_args, **kwargs)) return model register_model_deprecations(__name__, { 'vit_tiny_patch16_224_in21k': 'vit_tiny_patch16_224.augreg_in21k', 'vit_small_patch32_224_in21k': 'vit_small_patch32_224.augreg_in21k', 'vit_small_patch16_224_in21k': 'vit_small_patch16_224.augreg_in21k', 'vit_base_patch32_224_in21k': 'vit_base_patch32_224.augreg_in21k', 'vit_base_patch16_224_in21k': 'vit_base_patch16_224.augreg_in21k', 'vit_base_patch8_224_in21k': 'vit_base_patch8_224.augreg_in21k', 'vit_large_patch32_224_in21k': 'vit_large_patch32_224.orig_in21k', 'vit_large_patch16_224_in21k': 'vit_large_patch16_224.augreg_in21k', 'vit_huge_patch14_224_in21k': 'vit_huge_patch14_224.orig_in21k', 'vit_base_patch32_224_sam': 'vit_base_patch32_224.sam', 'vit_base_patch16_224_sam': 'vit_base_patch16_224.sam', 'vit_small_patch16_224_dino': 'vit_small_patch16_224.dino', 'vit_small_patch8_224_dino': 'vit_small_patch8_224.dino', 'vit_base_patch16_224_dino': 'vit_base_patch16_224.dino', 'vit_base_patch8_224_dino': 'vit_base_patch8_224.dino', 'vit_base_patch16_224_miil_in21k': 'vit_base_patch16_224_miil.in21k', 'vit_base_patch32_224_clip_laion2b': 'vit_base_patch32_clip_224.laion2b', 'vit_large_patch14_224_clip_laion2b': 'vit_large_patch14_clip_224.laion2b', 'vit_huge_patch14_224_clip_laion2b': 'vit_huge_patch14_clip_224.laion2b', 'vit_giant_patch14_224_clip_laion2b': 'vit_giant_patch14_clip_224.laion2b', })
pytorch-image-models/timm/models/vision_transformer.py/0
{ "file_path": "pytorch-image-models/timm/models/vision_transformer.py", "repo_id": "pytorch-image-models", "token_count": 91969 }
264
""" Adafactor (Big Vision variant) for PyTorch Adapted from the implementation in big vision: https://github.com/google-research/big_vision Described in 'Scaling Vision Transformers': https://arxiv.org/abs/2106.04560 References for added functionality: Cautious Optimizers: https://arxiv.org/abs/2411.16085 Why Gradients Rapidly Increase Near the End of Training: https://arxiv.org/abs/2506.02285 Adaptation and PyTorch modifications by Ross Wightman """ from typing import List, Optional, Tuple, Union import torch from torch import Tensor from torch.optim import Optimizer from ._types import ParamsT def _get_scalar_dtype(): """Get the scalar dtype that the optimizer uses for state""" return torch.float64 def _factored_dims( shape: Tuple[int, ...], factored: bool, min_dim_size_to_factor: int ) -> Optional[tuple[int, int]]: """Whether to use a factored second moment estimator. This function returns a tuple with the two largest axes to reduce over. If no two dimensions have size >= min_dim_size_to_factor, return None. Args: shape: an input shape factored: whether to use factored second-moment estimator for > 2d vars. min_dim_size_to_factor: only factor accumulator if two array dimensions have at least this size. Returns: None or a tuple of ints """ if not factored or len(shape) < 2: return None sorted_dims = sorted(((x, i) for i, x in enumerate(shape))) if shape[sorted_dims[-2][1]] < min_dim_size_to_factor: return None return int(sorted_dims[-2][1]), int(sorted_dims[-1][1]) class AdafactorBigVision(Optimizer): """ PyTorch implementation of BigVision's Adafactor variant with both single and multi tensor implementations. Adapted from https://github.com/google-research/big_vision by Ross Wightman """ def __init__( self, params: ParamsT, lr: float = 1.0, min_dim_size_to_factor: int = 16, decay_rate: float = 0.8, decay_offset: int = 0, beta2_cap: float = 0.999, momentum: Optional[float] = 0.9, momentum_dtype: Union[str, torch.dtype] = torch.bfloat16, eps: Optional[float] = None, weight_decay: float = 0.0, clipping_threshold: Optional[float] = None, unscaled_wd: bool = False, caution: bool = False, corrected_weight_decay: bool = False, *, foreach: Optional[bool] = False, ): if isinstance(momentum_dtype, str): if momentum_dtype == 'float16': momentum_dtype = torch.float16 elif momentum_dtype == 'bfloat16': momentum_dtype = torch.bfloat16 else: assert momentum_dtype == 'float32', f'{momentum_dtype} dtype not supported' momentum_dtype = torch.float32 # FIXME try to check if momentum dtype is appropriate for device? Torch API not great for this. defaults = dict( lr=lr, min_dim_size_to_factor=min_dim_size_to_factor, decay_rate=decay_rate, decay_offset=decay_offset, beta2_cap=beta2_cap, momentum=momentum, momentum_dtype=momentum_dtype, eps=eps, weight_decay=weight_decay, clipping_threshold=clipping_threshold, unscaled_wd=unscaled_wd, caution=caution, corrected_weight_decay=corrected_weight_decay, foreach=foreach, ) super().__init__(params, defaults) def __setstate__(self, state): super().__setstate__(state) for group in self.param_groups: group.setdefault('caution', False) group.setdefault('corrected_weight_decay', False) group.setdefault('foreach', None) for p in group['params']: p_state = self.state.get(p, {}) if len(p_state) != 0 and not torch.is_tensor(p_state['step']): p_state['step'] = torch.tensor(float(p_state['step']), dtype=_get_scalar_dtype()) if 'exp_avg' in p_state and torch.is_tensor(p_state['exp_avg']): # FIXME this is a bit of a hack, optimizer.load_state_dict appears to upcast # the momentum to float32 (it's half precision in the state_dict), need to # look into this further. Better to override _process_value_according_to_param_policy? p_state['exp_avg'] = p_state['exp_avg'].to(dtype=self.defaults['momentum_dtype']) @torch.no_grad() def step(self, closure=None): loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: params_with_grad = [] grads = [] exp_avg_sq_rs = [] exp_avg_sq_cs = [] exp_avg_sqs = [] state_steps = [] exp_avgs = [] # For momentum for p in group['params']: if p.grad is None: continue if p.grad.is_sparse: raise RuntimeError("Sparse gradients not supported") params_with_grad.append(p) grads.append(p.grad) state = self.state[p] if len(state) == 0: # NOTE step on CPU, probably need some more though to make capturable state['step'] = torch.tensor(0.0, dtype=_get_scalar_dtype()) shape = p.grad.shape factored_dims = _factored_dims( shape, factored=True, min_dim_size_to_factor=self.defaults['min_dim_size_to_factor'] ) if factored_dims is not None: dc, dr = factored_dims row_shape = list(p.grad.shape) row_shape[dr] = 1 col_shape = list(p.grad.shape) col_shape[dc] = 1 state['exp_avg_sq_r'] = p.grad.new_zeros(row_shape) state['exp_avg_sq_c'] = p.grad.new_zeros(col_shape) else: state['exp_avg_sq'] = torch.zeros_like(p.grad, memory_format=torch.preserve_format) if self.defaults['momentum'] is not None: state['exp_avg'] = torch.zeros_like(p.grad, dtype=self.defaults['momentum_dtype']) state_steps.append(state['step']) exp_avg_sq_rs.append(state.get('exp_avg_sq_r', None)) exp_avg_sq_cs.append(state.get('exp_avg_sq_c', None)) exp_avg_sqs.append(state.get('exp_avg_sq', None)) exp_avgs.append(state.get('exp_avg', None)) if group['foreach']: func = _multi_tensor_adafactor else: func = _single_tensor_adafactor func( params=params_with_grad, grads=grads, exp_avg_sq_rs=exp_avg_sq_rs, exp_avg_sq_cs=exp_avg_sq_cs, exp_avg_sqs=exp_avg_sqs, exp_avgs=exp_avgs, state_steps=state_steps, beta2_decay=group['decay_rate'], beta2_cap=group['beta2_cap'], min_dim_size_to_factor=group['min_dim_size_to_factor'], eps=group['eps'], lr=group['lr'], weight_decay=group['weight_decay'], momentum=group['momentum'], momentum_dtype=group['momentum_dtype'], clipping_threshold=group['clipping_threshold'], unscaled_wd=group['unscaled_wd'], caution=group['caution'], max_lr=self.defaults['lr'] if group['corrected_weight_decay'] else None, ) return loss def _single_tensor_adafactor( params: List[Tensor], grads: List[Tensor], exp_avg_sq_rs: List[Optional[Tensor]], exp_avg_sq_cs: List[Optional[Tensor]], exp_avg_sqs: List[Optional[Tensor]], exp_avgs: List[Optional[Tensor]], state_steps: List[Tensor], *, beta2_decay: float, beta2_cap: float, min_dim_size_to_factor: int, eps: float, lr: float, weight_decay: float, momentum: Optional[float], momentum_dtype: Union[str, torch.dtype], clipping_threshold: Optional[float], unscaled_wd: bool, caution: bool, max_lr: Optional[float], ): for i, param in enumerate(params): grad = grads[i] exp_avg_sq_r = exp_avg_sq_rs[i] exp_avg_sq_c = exp_avg_sq_cs[i] exp_avg_sq = exp_avg_sqs[i] exp_avg = exp_avgs[i] step_t = state_steps[i] if eps is None: # default eps for avoiding div by zero, diff from float type eps eps = 1e-7 if grad.dtype == torch.float16 else 1e-30 # Update step step_t += 1 beta2_t = min(beta2_cap, 1.0 - float(step_t) ** (-beta2_decay)) one_minus_beta2_t = 1 - beta2_t grad_sqr = torch.square(grad) + eps # NOTE application of eps (epsilon1) mirrors the optax/big vision/t5x approach if exp_avg_sq is None: # factorized second moment dc, dr = _factored_dims(grad.shape, True, min_dim_size_to_factor=min_dim_size_to_factor) exp_avg_sq_r.lerp_(grad_sqr.mean(dim=dr, keepdim=True), one_minus_beta2_t) exp_avg_sq_c.lerp_(grad_sqr.mean(dim=dc, keepdim=True), one_minus_beta2_t) reduce_dc = dc - 1 if dc > dr else dc row_col_mean = exp_avg_sq_r.mean(dim=reduce_dc, keepdim=True) row_factor = (exp_avg_sq_r / row_col_mean).rsqrt() col_factor = exp_avg_sq_c.rsqrt() update = grad * row_factor * col_factor else: # non-factorized second moment assert exp_avg_sq_r is None and exp_avg_sq_c is None exp_avg_sq.lerp_(grad_sqr, one_minus_beta2_t) update = grad * exp_avg_sq.rsqrt() # Clip by RMS value if clipping_threshold is not None: denom = (update.norm(2) / ((update.numel() ** 0.5) / clipping_threshold)).clamp_(max=1.0) update.div_(denom) # Apply momentum (in different dtype) if momentum is not None and exp_avg is not None: if momentum_dtype != grad.dtype: exp_avg.lerp_(update.to(momentum_dtype), 1 - momentum) # ema update = exp_avg.to(grad.dtype) else: exp_avg.lerp_(update, 1 - momentum) # ema update = exp_avg.clone() if caution: # apply caution as per 'Cautious Optimizers': https://arxiv.org/abs/2411.16085 mask = (update * grad > 0).to(grad.dtype) mask.div_(mask.mean().clamp_(min=1e-3)) update.mul_(mask) # Scale by learning rate update.mul_(lr) # Perform weight decay if weight_decay != 0: if unscaled_wd: # match big vision impl, 'fully decoupled' decay w/o LR scaling if max_lr is None: param.mul_(1. - weight_decay) else: # corrected weight decay: scale by lr / max_lr param.mul_(1. - (lr / max_lr) * weight_decay) else: # match typical pytorch behaviour for decoupled decay, eg adamw where wd is scaled by LR if max_lr is None: param.mul_(1. - lr * weight_decay) else: # corrected weight decay: scale by lr^2 / max_lr param.mul_(1. - (lr ** 2 / max_lr) * weight_decay) # Update parameters param.add_(update, alpha=-1.0) def _multi_tensor_adafactor( params: List[Tensor], grads: List[Tensor], exp_avg_sq_rs: List[Optional[Tensor]], exp_avg_sq_cs: List[Optional[Tensor]], exp_avg_sqs: List[Optional[Tensor]], exp_avgs: List[Optional[Tensor]], state_steps: List[Tensor], *, beta2_decay: float, beta2_cap: float, min_dim_size_to_factor: int, eps: float, lr: float, weight_decay: float, momentum: Optional[float], momentum_dtype: Union[str, torch.dtype], clipping_threshold: Optional[float], unscaled_wd: bool, caution: bool, max_lr: Optional[float], ): # FIXME TODO assert False, 'multi-tensor fn (foreach=True) not implemented yet'
pytorch-image-models/timm/optim/adafactor_bv.py/0
{ "file_path": "pytorch-image-models/timm/optim/adafactor_bv.py", "repo_id": "pytorch-image-models", "token_count": 6669 }
265
""" Nvidia NovoGrad Optimizer. Original impl by Nvidia from Jasper example: - https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/SpeechRecognition/Jasper Paper: `Stochastic Gradient Methods with Layer-wise Adaptive Moments for Training of Deep Networks` - https://arxiv.org/abs/1905.11286 """ import torch from torch.optim.optimizer import Optimizer import math class NvNovoGrad(Optimizer): """ Implements Novograd algorithm. Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.95, 0.98)) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) grad_averaging: gradient averaging amsgrad (boolean, optional): whether to use the AMSGrad variant of this algorithm from the paper `On the Convergence of Adam and Beyond`_ (default: False) """ def __init__( self, params, lr=1e-3, betas=(0.95, 0.98), eps=1e-8, weight_decay=0, grad_averaging=False, amsgrad=False, ): if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {}".format(eps)) if not 0.0 <= betas[0] < 1.0: raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) defaults = dict( lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, grad_averaging=grad_averaging, amsgrad=amsgrad, ) super(NvNovoGrad, self).__init__(params, defaults) def __setstate__(self, state): super(NvNovoGrad, self).__setstate__(state) for group in self.param_groups: group.setdefault('amsgrad', False) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad if grad.is_sparse: raise RuntimeError('Sparse gradients are not supported.') amsgrad = group['amsgrad'] state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device) if amsgrad: # Maintains max of all exp. moving avg. of sq. grad. values state['max_exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device) exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] if amsgrad: max_exp_avg_sq = state['max_exp_avg_sq'] beta1, beta2 = group['betas'] state['step'] += 1 norm = torch.sum(torch.pow(grad, 2)) if exp_avg_sq == 0: exp_avg_sq.copy_(norm) else: exp_avg_sq.mul_(beta2).add_(norm, alpha=1 - beta2) if amsgrad: # Maintains the maximum of all 2nd moment running avg. till now torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) # Use the max. for normalizing running avg. of gradient denom = max_exp_avg_sq.sqrt().add_(group['eps']) else: denom = exp_avg_sq.sqrt().add_(group['eps']) grad.div_(denom) if group['weight_decay'] != 0: grad.add_(p, alpha=group['weight_decay']) if group['grad_averaging']: grad.mul_(1 - beta1) exp_avg.mul_(beta1).add_(grad) p.add_(exp_avg, alpha=-group['lr']) return loss
pytorch-image-models/timm/optim/nvnovograd.py/0
{ "file_path": "pytorch-image-models/timm/optim/nvnovograd.py", "repo_id": "pytorch-image-models", "token_count": 2509 }
266
from .agc import adaptive_clip_grad from .attention_extract import AttentionExtract from .checkpoint_saver import CheckpointSaver from .clip_grad import dispatch_clip_grad from .cuda import ApexScaler, NativeScaler from .decay_batch import decay_batch_step, check_batch_size_retry from .distributed import distribute_bn, reduce_tensor, init_distributed_device,\ world_info_from_env, is_distributed_env, is_primary from .jit import set_jit_legacy, set_jit_fuser from .log import setup_default_logging, FormatterNoInfo from .metrics import AverageMeter, accuracy from .misc import natural_key, add_bool_arg, ParseKwargs from .model import unwrap_model, get_state_dict, freeze, unfreeze, reparameterize_model from .model_ema import ModelEma, ModelEmaV2, ModelEmaV3 from .random import random_seed from .summary import update_summary, get_outdir
pytorch-image-models/timm/utils/__init__.py/0
{ "file_path": "pytorch-image-models/timm/utils/__init__.py", "repo_id": "pytorch-image-models", "token_count": 264 }
267
""" Summary utilities Hacked together by / Copyright 2020 Ross Wightman """ import csv import os from collections import OrderedDict try: import wandb except ImportError: pass def get_outdir(path, *paths, inc=False): outdir = os.path.join(path, *paths) if not os.path.exists(outdir): os.makedirs(outdir) elif inc: count = 1 outdir_inc = outdir + '-' + str(count) while os.path.exists(outdir_inc): count = count + 1 outdir_inc = outdir + '-' + str(count) assert count < 100 outdir = outdir_inc os.makedirs(outdir) return outdir def update_summary( epoch, train_metrics, eval_metrics, filename, lr=None, write_header=False, log_wandb=False, ): rowd = OrderedDict(epoch=epoch) rowd.update([('train_' + k, v) for k, v in train_metrics.items()]) if eval_metrics: rowd.update([('eval_' + k, v) for k, v in eval_metrics.items()]) if lr is not None: rowd['lr'] = lr if log_wandb: wandb.log(rowd) with open(filename, mode='a') as cf: dw = csv.DictWriter(cf, fieldnames=rowd.keys()) if write_header: # first iteration (epoch == 1 can't be used) dw.writeheader() dw.writerow(rowd)
pytorch-image-models/timm/utils/summary.py/0
{ "file_path": "pytorch-image-models/timm/utils/summary.py", "repo_id": "pytorch-image-models", "token_count": 633 }
268
<!--- Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> <p align="center"> <!-- Uncomment when CircleCI is set up <a href="https://circleci.com/gh/huggingface/accelerate"><img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/master"></a> --> <a href="https://github.com/huggingface/smolagents/blob/main/LICENSE"><img alt="License" src="https://img.shields.io/github/license/huggingface/smolagents.svg?color=blue"></a> <a href="https://huggingface.co/docs/smolagents"><img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/smolagents/index.html.svg?down_color=red&down_message=offline&up_message=online"></a> <a href="https://github.com/huggingface/smolagents/releases"><img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/smolagents.svg"></a> <a href="https://github.com/huggingface/smolagents/blob/main/CODE_OF_CONDUCT.md"><img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg"></a> </p> <h3 align="center"> <div style="display:flex;flex-direction:row;"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolagents/smolagents.png" alt="Hugging Face mascot as James Bond" width=400px> <p>Agents that think in code!</p> </div> </h3> `smolagents` is a library that enables you to run powerful agents in a few lines of code. It offers: ✨ **Simplicity**: the logic for agents fits in ~1,000 lines of code (see [agents.py](https://github.com/huggingface/smolagents/blob/main/src/smolagents/agents.py)). We kept abstractions to their minimal shape above raw code! 🧑‍💻 **First-class support for Code Agents**. Our [`CodeAgent`](https://huggingface.co/docs/smolagents/reference/agents#smolagents.CodeAgent) writes its actions in code (as opposed to "agents being used to write code"). To make it secure, we support executing in sandboxed environments via [E2B](https://e2b.dev/), Docker, or Pyodide+Deno WebAssembly sandbox. 🤗 **Hub integrations**: you can [share/pull tools or agents to/from the Hub](https://huggingface.co/docs/smolagents/reference/tools#smolagents.Tool.from_hub) for instant sharing of the most efficient agents! 🌐 **Model-agnostic**: smolagents supports any LLM. It can be a local `transformers` or `ollama` model, one of [many providers on the Hub](https://huggingface.co/blog/inference-providers), or any model from OpenAI, Anthropic and many others via our [LiteLLM](https://www.litellm.ai/) integration. 👁️ **Modality-agnostic**: Agents support text, vision, video, even audio inputs! Cf [this tutorial](https://huggingface.co/docs/smolagents/examples/web_browser) for vision. 🛠️ **Tool-agnostic**: you can use tools from any [MCP server](https://huggingface.co/docs/smolagents/reference/tools#smolagents.ToolCollection.from_mcp), from [LangChain](https://huggingface.co/docs/smolagents/reference/tools#smolagents.Tool.from_langchain), you can even use a [Hub Space](https://huggingface.co/docs/smolagents/reference/tools#smolagents.Tool.from_space) as a tool. Full documentation can be found [here](https://huggingface.co/docs/smolagents/index). > [!NOTE] > Check the our [launch blog post](https://huggingface.co/blog/smolagents) to learn more about `smolagents`! ## Quick demo First install the package with a default set of tools: ```bash pip install "smolagents[toolkit]" ``` Then define your agent, give it the tools it needs and run it! ```py from smolagents import CodeAgent, WebSearchTool, InferenceClientModel model = InferenceClientModel() agent = CodeAgent(tools=[WebSearchTool()], model=model, stream_outputs=True) agent.run("How many seconds would it take for a leopard at full speed to run through Pont des Arts?") ``` https://github.com/user-attachments/assets/84b149b4-246c-40c9-a48d-ba013b08e600 You can even share your agent to the Hub, as a Space repository: ```py agent.push_to_hub("m-ric/my_agent") # agent.from_hub("m-ric/my_agent") to load an agent from Hub ``` Our library is LLM-agnostic: you could switch the example above to any inference provider. <details> <summary> <b>InferenceClientModel, gateway for all <a href="https://huggingface.co/docs/inference-providers/index">inference providers</a> supported on HF</b></summary> ```py from smolagents import InferenceClientModel model = InferenceClientModel( model_id="deepseek-ai/DeepSeek-R1", provider="together", ) ``` </details> <details> <summary> <b>LiteLLM to access 100+ LLMs</b></summary> ```py from smolagents import LiteLLMModel model = LiteLLMModel( model_id="anthropic/claude-3-5-sonnet-latest", temperature=0.2, api_key=os.environ["ANTHROPIC_API_KEY"] ) ``` </details> <details> <summary> <b>OpenAI-compatible servers: Together AI</b></summary> ```py import os from smolagents import OpenAIServerModel model = OpenAIServerModel( model_id="deepseek-ai/DeepSeek-R1", api_base="https://api.together.xyz/v1/", # Leave this blank to query OpenAI servers. api_key=os.environ["TOGETHER_API_KEY"], # Switch to the API key for the server you're targeting. ) ``` </details> <details> <summary> <b>OpenAI-compatible servers: OpenRouter</b></summary> ```py import os from smolagents import OpenAIServerModel model = OpenAIServerModel( model_id="openai/gpt-4o", api_base="https://openrouter.ai/api/v1", # Leave this blank to query OpenAI servers. api_key=os.environ["OPENROUTER_API_KEY"], # Switch to the API key for the server you're targeting. ) ``` </details> <details> <summary> <b>Local `transformers` model</b></summary> ```py from smolagents import TransformersModel model = TransformersModel( model_id="Qwen/Qwen2.5-Coder-32B-Instruct", max_new_tokens=4096, device_map="auto" ) ``` </details> <details> <summary> <b>Azure models</b></summary> ```py import os from smolagents import AzureOpenAIServerModel model = AzureOpenAIServerModel( model_id = os.environ.get("AZURE_OPENAI_MODEL"), azure_endpoint=os.environ.get("AZURE_OPENAI_ENDPOINT"), api_key=os.environ.get("AZURE_OPENAI_API_KEY"), api_version=os.environ.get("OPENAI_API_VERSION") ) ``` </details> <details> <summary> <b>Amazon Bedrock models</b></summary> ```py import os from smolagents import AmazonBedrockServerModel model = AmazonBedrockServerModel( model_id = os.environ.get("AMAZON_BEDROCK_MODEL_ID") ) ``` </details> ## CLI You can run agents from CLI using two commands: `smolagent` and `webagent`. `smolagent` is a generalist command to run a multi-step `CodeAgent` that can be equipped with various tools. ```bash smolagent "Plan a trip to Tokyo, Kyoto and Osaka between Mar 28 and Apr 7." --model-type "InferenceClientModel" --model-id "Qwen/Qwen2.5-Coder-32B-Instruct" --imports "pandas numpy" --tools "web_search" ``` Meanwhile `webagent` is a specific web-browsing agent using [helium](https://github.com/mherrmann/helium) (read more [here](https://github.com/huggingface/smolagents/blob/main/src/smolagents/vision_web_browser.py)). For instance: ```bash webagent "go to xyz.com/men, get to sale section, click the first clothing item you see. Get the product details, and the price, return them. note that I'm shopping from France" --model-type "LiteLLMModel" --model-id "gpt-4o" ``` ## How do Code agents work? Our [`CodeAgent`](https://huggingface.co/docs/smolagents/reference/agents#smolagents.CodeAgent) works mostly like classical ReAct agents - the exception being that the LLM engine writes its actions as Python code snippets. ```mermaid flowchart TB Task[User Task] Memory[agent.memory] Generate[Generate from agent.model] Execute[Execute Code action - Tool calls are written as functions] Answer[Return the argument given to 'final_answer'] Task -->|Add task to agent.memory| Memory subgraph ReAct[ReAct loop] Memory -->|Memory as chat messages| Generate Generate -->|Parse output to extract code action| Execute Execute -->|No call to 'final_answer' tool => Store execution logs in memory and keep running| Memory end Execute -->|Call to 'final_answer' tool| Answer %% Styling classDef default fill:#d4b702,stroke:#8b7701,color:#ffffff classDef io fill:#4a5568,stroke:#2d3748,color:#ffffff class Task,Answer io ``` Actions are now Python code snippets. Hence, tool calls will be performed as Python function calls. For instance, here is how the agent can perform web search over several websites in one single action: ```py requests_to_search = ["gulf of mexico america", "greenland denmark", "tariffs"] for request in requests_to_search: print(f"Here are the search results for {request}:", web_search(request)) ``` Writing actions as code snippets is demonstrated to work better than the current industry practice of letting the LLM output a dictionary of the tools it wants to call: [uses 30% fewer steps](https://huggingface.co/papers/2402.01030) (thus 30% fewer LLM calls) and [reaches higher performance on difficult benchmarks](https://huggingface.co/papers/2411.01747). Head to [our high-level intro to agents](https://huggingface.co/docs/smolagents/conceptual_guides/intro_agents) to learn more on that. Especially, since code execution can be a security concern (arbitrary code execution!), we provide options at runtime: - a secure python interpreter to run code more safely in your environment (more secure than raw code execution but still risky) - a sandboxed environment using [E2B](https://e2b.dev/) or Docker (removes the risk to your own system). Alongside [`CodeAgent`](https://huggingface.co/docs/smolagents/reference/agents#smolagents.CodeAgent), we also provide the standard [`ToolCallingAgent`](https://huggingface.co/docs/smolagents/reference/agents#smolagents.ToolCallingAgent) which writes actions as JSON/text blobs. You can pick whichever style best suits your use case. ## How smol is this library? We strived to keep abstractions to a strict minimum: the main code in `agents.py` has <1,000 lines of code. Still, we implement several types of agents: `CodeAgent` writes its actions as Python code snippets, and the more classic `ToolCallingAgent` leverages built-in tool calling methods. We also have multi-agent hierarchies, import from tool collections, remote code execution, vision models... By the way, why use a framework at all? Well, because a big part of this stuff is non-trivial. For instance, the code agent has to keep a consistent format for code throughout its system prompt, its parser, the execution. So our framework handles this complexity for you. But of course we still encourage you to hack into the source code and use only the bits that you need, to the exclusion of everything else! ## How strong are open models for agentic workflows? We've created [`CodeAgent`](https://huggingface.co/docs/smolagents/reference/agents#smolagents.CodeAgent) instances with some leading models, and compared them on [this benchmark](https://huggingface.co/datasets/m-ric/agents_medium_benchmark_2) that gathers questions from a few different benchmarks to propose a varied blend of challenges. [Find the benchmarking code here](https://github.com/huggingface/smolagents/blob/main/examples/smolagents_benchmark/run.py) for more detail on the agentic setup used, and see a comparison of using LLMs code agents compared to vanilla (spoilers: code agents works better). <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolagents/benchmark_code_agents.jpeg" alt="benchmark of different models on agentic workflows. Open model DeepSeek-R1 beats closed-source models." width=60% max-width=500px> </p> This comparison shows that open-source models can now take on the best closed models! ## Security Security is a critical consideration when working with code-executing agents. Our library provides: - Sandboxed execution options using [E2B](https://e2b.dev/), Docker, or Pyodide+Deno WebAssembly sandbox - Best practices for running agent code securely For security policies, vulnerability reporting, and more information on secure agent execution, please see our [Security Policy](SECURITY.md). ## Contribute Everyone is welcome to contribute, get started with our [contribution guide](https://github.com/huggingface/smolagents/blob/main/CONTRIBUTING.md). ## Cite smolagents If you use `smolagents` in your publication, please cite it by using the following BibTeX entry. ```bibtex @Misc{smolagents, title = {`smolagents`: a smol library to build great agentic systems.}, author = {Aymeric Roucher and Albert Villanova del Moral and Thomas Wolf and Leandro von Werra and Erik Kaunismäki}, howpublished = {\url{https://github.com/huggingface/smolagents}}, year = {2025} } ```
smolagents/README.md/0
{ "file_path": "smolagents/README.md", "repo_id": "smolagents", "token_count": 4304 }
269
# Installation Options The `smolagents` library can be installed using pip. Here are the different installation methods and options available. ## Prerequisites - Python 3.10 or newer - Python package manager: [`pip`](https://pip.pypa.io/en/stable/) or [`uv`](https://docs.astral.sh/uv/) ## Virtual Environment It's strongly recommended to install `smolagents` within a Python virtual environment. Virtual environments isolate your project dependencies from other Python projects and your system Python installation, preventing version conflicts and making package management more reliable. <hfoptions id="virtual-environment"> <hfoption id="venv"> Using [`venv`](https://docs.python.org/3/library/venv.html): ```bash python -m venv .venv source .venv/bin/activate ``` </hfoption> <hfoption id="uv"> Using [`uv`](https://docs.astral.sh/uv/): ```bash uv venv .venv source .venv/bin/activate ``` </hfoption> </hfoptions> ## Basic Installation Install `smolagents` core library with: <hfoptions id="installation"> <hfoption id="pip"> ```bash pip install smolagents ``` </hfoption> <hfoption id="uv"> ```bash uv pip install smolagents ``` </hfoption> </hfoptions> ## Installation with Extras `smolagents` provides several optional dependencies (extras) that can be installed based on your needs. You can install these extras using the following syntax: <hfoptions id="installation"> <hfoption id="pip"> ```bash pip install "smolagents[extra1,extra2]" ``` </hfoption> <hfoption id="uv"> ```bash uv pip install "smolagents[extra1,extra2]" ``` </hfoption> </hfoptions> ### Tools These extras include various tools and integrations: <hfoptions id="installation"> <hfoption id="pip"> - **toolkit**: Install a default set of tools for common tasks. ```bash pip install "smolagents[toolkit]" ``` - **mcp**: Add support for the Model Context Protocol (MCP) to integrate with external tools and services. ```bash pip install "smolagents[mcp]" ``` </hfoption> <hfoption id="uv"> - **toolkit**: Install a default set of tools for common tasks. ```bash uv pip install "smolagents[toolkit]" ``` - **mcp**: Add support for the Model Context Protocol (MCP) to integrate with external tools and services. ```bash uv pip install "smolagents[mcp]" ``` </hfoption> </hfoptions> ### Model Integration These extras enable integration with various AI models and frameworks: <hfoptions id="installation"> <hfoption id="pip"> - **openai**: Add support for OpenAI API models. ```bash pip install "smolagents[openai]" ``` - **transformers**: Enable Hugging Face Transformers models. ```bash pip install "smolagents[transformers]" ``` - **vllm**: Add VLLM support for efficient model inference. ```bash pip install "smolagents[vllm]" ``` - **mlx-lm**: Enable support for MLX-LM models. ```bash pip install "smolagents[mlx-lm]" ``` - **litellm**: Add LiteLLM support for lightweight model inference. ```bash pip install "smolagents[litellm]" ``` - **bedrock**: Enable support for AWS Bedrock models. ```bash pip install "smolagents[bedrock]" ``` </hfoption> <hfoption id="uv"> - **openai**: Add support for OpenAI API models. ```bash uv pip install "smolagents[openai]" ``` - **transformers**: Enable Hugging Face Transformers models. ```bash uv pip install "smolagents[transformers]" ``` - **vllm**: Add VLLM support for efficient model inference. ```bash uv pip install "smolagents[vllm]" ``` - **mlx-lm**: Enable support for MLX-LM models. ```bash uv pip install "smolagents[mlx-lm]" ``` - **litellm**: Add LiteLLM support for lightweight model inference. ```bash uv pip install "smolagents[litellm]" ``` - **bedrock**: Enable support for AWS Bedrock models. ```bash uv pip install "smolagents[bedrock]" ``` </hfoption> </hfoptions> ### Multimodal Capabilities Extras for handling different types of media and input: <hfoptions id="installation"> <hfoption id="pip"> - **vision**: Add support for image processing and computer vision tasks. ```bash pip install "smolagents[vision]" ``` - **audio**: Enable audio processing capabilities. ```bash pip install "smolagents[audio]" ``` </hfoption> <hfoption id="uv"> - **vision**: Add support for image processing and computer vision tasks. ```bash uv pip install "smolagents[vision]" ``` - **audio**: Enable audio processing capabilities. ```bash uv pip install "smolagents[audio]" ``` </hfoption> </hfoptions> ### Remote Execution Extras for executing code remotely: <hfoptions id="installation"> <hfoption id="pip"> - **docker**: Add support for executing code in Docker containers. ```bash pip install "smolagents[docker]" ``` - **e2b**: Enable E2B support for remote execution. ```bash pip install "smolagents[e2b]" ``` </hfoption> <hfoption id="uv"> - **docker**: Add support for executing code in Docker containers. ```bash uv pip install "smolagents[docker]" ``` - **e2b**: Enable E2B support for remote execution. ```bash uv pip install "smolagents[e2b]" ``` </hfoption> </hfoptions> ### Telemetry and User Interface Extras for telemetry, monitoring and user interface components: <hfoptions id="installation"> <hfoption id="pip"> - **telemetry**: Add support for monitoring and tracing. ```bash pip install "smolagents[telemetry]" ``` - **gradio**: Add support for interactive Gradio UI components. ```bash pip install "smolagents[gradio]" ``` </hfoption> <hfoption id="uv"> - **telemetry**: Add support for monitoring and tracing. ```bash uv pip install "smolagents[telemetry]" ``` - **gradio**: Add support for interactive Gradio UI components. ```bash uv pip install "smolagents[gradio]" ``` </hfoption> </hfoptions> ### Complete Installation To install all available extras, you can use: <hfoptions id="installation"> <hfoption id="pip"> ```bash pip install "smolagents[all]" ``` </hfoption> <hfoption id="uv"> ```bash uv pip install "smolagents[all]" ``` </hfoption> </hfoptions> ## Verifying Installation After installation, you can verify that `smolagents` is installed correctly by running: ```python import smolagents print(smolagents.__version__) ``` ## Next Steps Once you have successfully installed `smolagents`, you can: - Follow the [guided tour](./guided_tour) to learn the basics. - Explore the [how-to guides](./examples/text_to_sql) for practical examples. - Read the [conceptual guides](./conceptual_guides/intro_agents) for high-level explanations. - Check out the [tutorials](./tutorials/building_good_agents) for in-depth tutorials on building agents. - Explore the [API reference](./reference/index) for detailed information on classes and functions.
smolagents/docs/source/en/installation.md/0
{ "file_path": "smolagents/docs/source/en/installation.md", "repo_id": "smolagents", "token_count": 2204 }
270
# Text-to-SQL [[open-in-colab]] इस ट्यूटोरियल में, हम देखेंगे कि कैसे `smolagents` का उपयोग करके एक एजेंट को SQL का उपयोग करने के लिए लागू किया जा सकता है। > आइए सबसे महत्वपूर्ण प्रश्न से शुरू करें: इसे साधारण क्यों नहीं रखें और एक सामान्य text-to-SQL पाइपलाइन का उपयोग करें? एक सामान्य text-to-SQL पाइपलाइन कमजोर होती है, क्योंकि उत्पन्न SQL क्वेरी गलत हो सकती है। इससे भी बुरी बात यह है कि क्वेरी गलत हो सकती है, लेकिन कोई एरर नहीं दिखाएगी, बल्कि बिना किसी अलार्म के गलत/बेकार आउटपुट दे सकती है। 👉 इसके बजाय, एक एजेंट सिस्टम आउटपुट का गंभीरता से निरीक्षण कर सकता है और तय कर सकता है कि क्वेरी को बदलने की जरूरत है या नहीं, इस प्रकार इसे बेहतर प्रदर्शन में मदद मिलती है। आइए इस एजेंट को बनाएं! 💪 पहले, हम SQL एनवायरनमेंट सेटअप करते हैं: ```py from sqlalchemy import ( create_engine, MetaData, Table, Column, String, Integer, Float, insert, inspect, text, ) engine = create_engine("sqlite:///:memory:") metadata_obj = MetaData() # create city SQL table table_name = "receipts" receipts = Table( table_name, metadata_obj, Column("receipt_id", Integer, primary_key=True), Column("customer_name", String(16), primary_key=True), Column("price", Float), Column("tip", Float), ) metadata_obj.create_all(engine) rows = [ {"receipt_id": 1, "customer_name": "Alan Payne", "price": 12.06, "tip": 1.20}, {"receipt_id": 2, "customer_name": "Alex Mason", "price": 23.86, "tip": 0.24}, {"receipt_id": 3, "customer_name": "Woodrow Wilson", "price": 53.43, "tip": 5.43}, {"receipt_id": 4, "customer_name": "Margaret James", "price": 21.11, "tip": 1.00}, ] for row in rows: stmt = insert(receipts).values(**row) with engine.begin() as connection: cursor = connection.execute(stmt) ``` ### Agent बनाएं अब आइए हमारी SQL टेबल को एक टूल द्वारा पुनर्प्राप्त करने योग्य बनाएं। टूल का विवरण विशेषता एजेंट सिस्टम द्वारा LLM के prompt में एम्बेड किया जाएगा: यह LLM को टूल का उपयोग करने के बारे में जानकारी देता है। यहीं पर हम SQL टेबल का वर्णन करना चाहते हैं। ```py inspector = inspect(engine) columns_info = [(col["name"], col["type"]) for col in inspector.get_columns("receipts")] table_description = "Columns:\n" + "\n".join([f" - {name}: {col_type}" for name, col_type in columns_info]) print(table_description) ``` ```text Columns: - receipt_id: INTEGER - customer_name: VARCHAR(16) - price: FLOAT - tip: FLOAT ``` अब आइए हमारा टूल बनाएं। इसे निम्नलिखित की आवश्यकता है: (अधिक जानकारी के लिए [टूल doc](../tutorials/tools) पढ़ें) - एक डॉकस्ट्रिंग जिसमें आर्ग्युमेंट्स की सूची वाला `Args:` भाग हो। - इनपुट और आउटपुट दोनों पर टाइप हिंट्स। ```py from smolagents import tool @tool def sql_engine(query: str) -> str: """ Allows you to perform SQL queries on the table. Returns a string representation of the result. The table is named 'receipts'. Its description is as follows: Columns: - receipt_id: INTEGER - customer_name: VARCHAR(16) - price: FLOAT - tip: FLOAT Args: query: The query to perform. This should be correct SQL. """ output = "" with engine.connect() as con: rows = con.execute(text(query)) for row in rows: output += "\n" + str(row) return output ``` अब आइए एक एजेंट बनाएं जो इस टूल का लाभ उठाता है। हम `CodeAgent` का उपयोग करते हैं, जो smolagents का मुख्य एजेंट क्लास है: एक एजेंट जो कोड में एक्शन लिखता है और ReAct फ्रेमवर्क के अनुसार पिछले आउटपुट पर पुनरावृत्ति कर सकता है। मॉडल वह LLM है जो एजेंट सिस्टम को संचालित करता है। `InferenceClientModel` आपको HF के Inference API का उपयोग करके LLM को कॉल करने की अनुमति देता है, या तो सर्वरलेस या डेडिकेटेड एंडपॉइंट के माध्यम से, लेकिन आप किसी भी प्रोप्राइटरी API का भी उपयोग कर सकते हैं। ```py from smolagents import CodeAgent, InferenceClientModel agent = CodeAgent( tools=[sql_engine], model=InferenceClientModel(model_id="meta-llama/Meta-Llama-3.1-8B-Instruct"), ) agent.run("Can you give me the name of the client who got the most expensive receipt?") ``` ### लेवल 2: टेबल जॉइन्स अब आइए इसे और चुनौतीपूर्ण बनाएं! हम चाहते हैं कि हमारा एजेंट कई टेबल्स के बीच जॉइन को संभाल सके। तो आइए हम प्रत्येक receipt_id के लिए वेटर्स के नाम रिकॉर्ड करने वाली एक दूसरी टेबल बनाते हैं! ```py table_name = "waiters" receipts = Table( table_name, metadata_obj, Column("receipt_id", Integer, primary_key=True), Column("waiter_name", String(16), primary_key=True), ) metadata_obj.create_all(engine) rows = [ {"receipt_id": 1, "waiter_name": "Corey Johnson"}, {"receipt_id": 2, "waiter_name": "Michael Watts"}, {"receipt_id": 3, "waiter_name": "Michael Watts"}, {"receipt_id": 4, "waiter_name": "Margaret James"}, ] for row in rows: stmt = insert(receipts).values(**row) with engine.begin() as connection: cursor = connection.execute(stmt) ``` चूंकि हमने टेबल को बदल दिया है, हम LLM को इस टेबल की जानकारी का उचित उपयोग करने देने के लिए इस टेबल के विवरण के साथ `SQLExecutorTool` को अपडेट करते हैं। ```py updated_description = """Allows you to perform SQL queries on the table. Beware that this tool's output is a string representation of the execution output. It can use the following tables:""" inspector = inspect(engine) for table in ["receipts", "waiters"]: columns_info = [(col["name"], col["type"]) for col in inspector.get_columns(table)] table_description = f"Table '{table}':\n" table_description += "Columns:\n" + "\n".join([f" - {name}: {col_type}" for name, col_type in columns_info]) updated_description += "\n\n" + table_description print(updated_description) ``` चूंकि यह रिक्वेस्ट पिछले वाले से थोड़ी कठिन है, हम LLM इंजन को अधिक शक्तिशाली [Qwen/Qwen2.5-Coder-32B-Instruct](https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct) का उपयोग करने के लिए स्विच करेंगे! ```py sql_engine.description = updated_description agent = CodeAgent( tools=[sql_engine], model=InferenceClientModel(model_id="Qwen/Qwen2.5-Coder-32B-Instruct"), ) agent.run("Which waiter got more total money from tips?") ``` यह सीधे काम करता है! सेटअप आश्चर्यजनक रूप से सरल था, है ना? यह उदाहरण पूरा हो गया! हमने इन अवधारणाओं को छुआ है: - नए टूल्स का निर्माण। - टूल के विवरण को अपडेट करना। - एक मजबूत LLM में स्विच करने से एजेंट की तर्कशक्ति में मदद मिलती है। ✅ अब आप वह text-to-SQL सिस्टम बना सकते हैं जिसका आपने हमेशा सपना देखा है! ✨
smolagents/docs/source/hi/examples/text_to_sql.md/0
{ "file_path": "smolagents/docs/source/hi/examples/text_to_sql.md", "repo_id": "smolagents", "token_count": 5027 }
271
# Agent 简介 > [!TIP] > 译者注:Agent 的业内术语是“智能体”。本译文将保留 agent,不作翻译,以带来更高效的阅读体验。(在中文为主的文章中,It's easier to 注意到英文。Attention Is All You Need!) ## 🤔 什么是 agent? 任何使用 AI 的高效系统都需要为 LLM 提供某种访问现实世界的方式:例如调用搜索工具获取外部信息,或者操作某些程序以完成任务。换句话说,LLM 应该具有 **_Agent 能力_**。Agent 程序是 LLM 通往外部世界的门户。 > [!TIP] > AI agent 是 **LLM 输出控制工作流的程序**。 任何利用 LLM 的系统都会将 LLM 输出集成到代码中。LLM 输入对代码工作流的影响程度就是 LLM 在系统中的 agent 能力级别。 请注意,根据这个定义,"Agent" 不是一个离散的、非 0 即 1 的定义:相反,"Agent 能力" 是一个连续谱系,随着你在工作流中给予 LLM 更多或更少的权力而变化。 请参见下表中 agent 能力在不同系统中的变化: | Agent 能力级别 | 描述 | 名称 | 示例模式 | | ------------ | ---------------------------------------------- | ---------- | -------------------------------------------------- | | ☆☆☆ | LLM 输出对程序流程没有影响 | 简单处理器 | `process_llm_output(llm_response)` | | ★☆☆ | LLM 输出决定 if/else 分支 | 路由 | `if llm_decision(): path_a() else: path_b()` | | ★★☆ | LLM 输出决定函数执行 | 工具调用者 | `run_function(llm_chosen_tool, llm_chosen_args)` | | ★★★ | LLM 输出控制迭代和程序继续 | 多步 Agent | `while llm_should_continue(): execute_next_step()` | | ★★★ | 一个 agent 工作流可以启动另一个 agent 工作流 | 多 Agent | `if llm_trigger(): execute_agent()` | 多步 agent 具有以下代码结构: ```python memory = [user_defined_task] while llm_should_continue(memory): # 这个循环是多步部分 action = llm_get_next_action(memory) # 这是工具调用部分 observations = execute_action(action) memory += [action, observations] ``` 这个 agent 系统在一个循环中运行,每一步执行一个新动作(该动作可能涉及调用一些预定义的 *工具*,这些工具只是函数),直到其观察结果表明已达到解决给定任务的满意状态。以下是一个多步 agent 如何解决简单数学问题的示例: <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/Agent_ManimCE.gif"/> </div> ## ✅ 何时使用 agent / ⛔ 何时避免使用 当你需要 LLM 确定应用程序的工作流时,agent 很有用。但它们通常有些过度。问题是:我真的需要工作流的灵活性来有效解决手头的任务吗? 如果预定义的工作流经常不足,这意味着你需要更多的灵活性。 让我们举个例子:假设你正在开发一个处理冲浪旅行网站客户请求的应用程序。 你可以提前知道请求将属于 2 个类别之一(基于用户选择),并且你为这 2 种情况都有预定义的工作流。 1. 想要了解旅行信息?⇒ 给他们访问搜索栏以搜索你的知识库 2. 想与销售交谈?⇒ 让他们填写联系表单。 如果这个确定性工作流适合所有查询,那就直接编码吧!这将为你提供一个 100% 可靠的系统,没有让不可预测的 LLM 干扰你的工作流而引入错误的风险。为了简单和稳健起见,建议规范化不使用任何 agent 行为。 但如果工作流不能提前确定得那么好呢? 例如,用户想问:`"I can come on Monday, but I forgot my passport so risk being delayed to Wednesday, is it possible to take me and my stuff to surf on Tuesday morning, with a cancellation insurance?"` 这个问题涉及许多因素,可能上述预定的标准都不足以满足这个请求。 如果预定义的工作流经常不足,这意味着你需要更多的灵活性。 这就是 agent 设置发挥作用的地方。 在上面的例子中,你可以创建一个多步 agent,它可以访问天气 API 获取天气预报,Google Maps API 计算旅行距离,员工在线仪表板和你的知识库上的 RAG 系统。 直到最近,计算机程序还局限于预定义的工作流,试图通过堆积 if/else 分支来处理复杂性。它们专注于极其狭窄的任务,如"计算这些数字的总和"或"找到这个图中的最短路径"。但实际上,大多数现实生活中的任务,如我们上面的旅行示例,都不适合预定义的工作流。agent 系统为程序打开了现实世界任务的大门! ## 为什么选择 `smolagents`? 对于一些低级的 agent 用例,如链或路由器,你可以自己编写所有代码。这样会更好,因为它可以让你更好地控制和理解你的系统。 但一旦你开始追求更复杂的行为,比如让 LLM 调用函数(即"工具调用")或让 LLM 运行 while 循环("多步 agent"),一些抽象就变得必要: - 对于工具调用,你需要解析 agent 的输出,因此这个输出需要一个预定义的格式,如"Thought: I should call tool 'get_weather'. Action: get_weather(Paris).",你用预定义的函数解析它,并且给 LLM 的系统提示应该通知它这个格式。 - 对于 LLM 输出决定循环的多步 agent,你需要根据上次循环迭代中发生的情况给 LLM 不同的提示:所以你需要某种记忆能力。 看到了吗?通过这两个例子,我们已经发现需要一些项目来帮助我们: - 当然,一个作为系统引擎的 LLM - agent 可以访问的工具列表 - 从 LLM 输出中提取工具调用的解析器 - 与解析器同步的系统提示 - 记忆能力 但是等等,既然我们给 LLM 在决策中留出了空间,它们肯定会犯错误:所以我们需要错误日志记录和重试机制。 所有这些元素都需要紧密耦合才能形成一个功能良好的系统。这就是为什么我们决定需要制作基本构建块来让所有这些东西协同工作。 ## 代码 agent 在多步 agent 中,每一步 LLM 都可以编写一个动作,形式为调用外部工具。编写这些动作的常见格式(由 Anthropic、OpenAI 等使用)通常是"将动作编写为工具名称和要使用的参数的 JSON,然后解析以知道要执行哪个工具以及使用哪些参数"的不同变体。 [多项](https://huggingface.co/papers/2402.01030) [研究](https://huggingface.co/papers/2411.01747) [论文](https://huggingface.co/papers/2401.00812) 表明,在代码中进行工具调用的 LLM 要好得多。 原因很简单,_我们专门设计了我们的代码语言,使其成为表达计算机执行动作的最佳方式_。如果 JSON 片段是更好的表达方式,JSON 将成为顶级编程语言,编程将变得非常困难。 下图取自 [Executable Code Actions Elicit Better LLM Agents](https://huggingface.co/papers/2402.01030),说明了用代码编写动作的一些优势: <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/code_vs_json_actions.png"> 与 JSON 片段相比,用代码编写动作提供了更好的: - **可组合性:** 你能像定义 python 函数一样,将 JSON 动作嵌套在一起,或定义一组 JSON 动作以供重用吗? - **对象管理:** 你如何在 JSON 中存储像 `generate_image` 这样的动作的输出? - **通用性:** 代码被构建为简单地表达任何你可以让计算机做的事情。 - **LLM 训练数据中的表示:** 大量高质量的代码动作已经包含在 LLM 的训练数据中,这意味着它们已经为此进行了训练!
smolagents/docs/source/zh/conceptual_guides/intro_agents.md/0
{ "file_path": "smolagents/docs/source/zh/conceptual_guides/intro_agents.md", "repo_id": "smolagents", "token_count": 5097 }
272
# This is a config for E2B sandbox template. # You can use template ID (qywp2ctmu2q7jzprcf4j) to create a sandbox: # Python SDK # from e2b import Sandbox, AsyncSandbox # sandbox = Sandbox("qywp2ctmu2q7jzprcf4j") # Sync sandbox # sandbox = await AsyncSandbox.create("qywp2ctmu2q7jzprcf4j") # Async sandbox # JS SDK # import { Sandbox } from 'e2b' # const sandbox = await Sandbox.create('qywp2ctmu2q7jzprcf4j') team_id = "f8776d3a-df2f-4a1d-af48-68c2e13b3b87" start_cmd = "/root/.jupyter/start-up.sh" dockerfile = "e2b.Dockerfile" template_id = "qywp2ctmu2q7jzprcf4j"
smolagents/e2b.toml/0
{ "file_path": "smolagents/e2b.toml", "repo_id": "smolagents", "token_count": 254 }
273
import re import string import warnings def normalize_number_str(number_str: str) -> float: # we replace these common units and commas to allow # conversion to float for char in ["$", "%", ","]: number_str = number_str.replace(char, "") try: return float(number_str) except ValueError: print(f"String {number_str} cannot be normalized to number str.") return float("inf") def split_string( s: str, char_list: list[str] = [",", ";"], ) -> list[str]: pattern = f"[{''.join(char_list)}]" return re.split(pattern, s) def is_float(element: any) -> bool: try: float(element) return True except ValueError: return False def question_scorer( model_answer: str, ground_truth: str, ) -> bool: # if gt is a number if is_float(ground_truth): normalized_answer = normalize_number_str(str(model_answer)) return normalized_answer == float(ground_truth) # if gt is a list elif any(char in ground_truth for char in [",", ";"]): # question with the fish: normalization removes punct gt_elems = split_string(ground_truth) ma_elems = split_string(model_answer) # check length is the same if len(gt_elems) != len(ma_elems): warnings.warn("Answer lists have different lengths, returning False.", UserWarning) return False # compare each element as float or str comparisons = [] for ma_elem, gt_elem in zip(ma_elems, gt_elems): if is_float(gt_elem): normalized_ma_elem = normalize_number_str(ma_elem) comparisons.append(normalized_ma_elem == float(gt_elem)) else: # we do not remove punct since comparisons can include punct comparisons.append( normalize_str(ma_elem, remove_punct=False) == normalize_str(gt_elem, remove_punct=False) ) return all(comparisons) # if gt is a str else: return normalize_str(model_answer) == normalize_str(ground_truth) def check_prediction_contains_answer_letters_in_order(prediction, true_answer): prediction = prediction.lower() true_answer = true_answer.lower() if len(prediction) > len(true_answer) * 3: return False i = 0 for letter in true_answer: if letter in prediction[i:]: i += prediction[i:].index(letter) else: return False return True def check_close_call(prediction, true_answer, is_correct): if is_correct: return True else: if is_float(true_answer): return is_correct else: if ( check_prediction_contains_answer_letters_in_order(str(prediction), str(true_answer)) and len(str(true_answer)) * 0.5 <= len(str(prediction)) <= len(str(true_answer)) * 2 ): print(f"Close call: {prediction} vs {true_answer}") return True else: return False def normalize_str(input_str, remove_punct=True) -> str: """ Normalize a string by: - Removing all white spaces - Optionally removing punctuation (if remove_punct is True) - Converting to lowercase Parameters: - input_str: str, the string to normalize - remove_punct: bool, whether to remove punctuation (default: True) Returns: - str, the normalized string """ # Remove all white spaces. Required e.g for seagull vs. sea gull no_spaces = re.sub(r"\s", "", input_str) # Remove punctuation, if specified. if remove_punct: translator = str.maketrans("", "", string.punctuation) return no_spaces.lower().translate(translator) else: return no_spaces.lower()
smolagents/examples/open_deep_research/scripts/gaia_scorer.py/0
{ "file_path": "smolagents/examples/open_deep_research/scripts/gaia_scorer.py", "repo_id": "smolagents", "token_count": 1643 }
274
<jupyter_start><jupyter_code>!pip install -e .. datasets sympy numpy matplotlib seaborn -q # Install dev version of smolagents + some packages # Benchmark date # - set a concrete date: DATE = "2024-12-26" # - or use default: today # DATE = None # Evaluation dataset # - the dataset is gated, so you must first visit its page to request access: https://huggingface.co/datasets/smolagents-benchmark/benchmark-v1 EVAL_DATASET = "smolagents/benchmark-v1" # Answers dataset: it must be a gated dataset; required to score the answers ANSWERS_DATASET = "smolagents/answers" # Whether to push the answers dataset to the Hub PUSH_ANSWERS_DATASET_TO_HUB = True # Results dataset RESULTS_DATASET = "smolagents/results" # Whether to push the results dataset to the Hub PUSH_RESULTS_DATASET_TO_HUB = True<jupyter_output><empty_output><jupyter_text>Constants and utilities/tools<jupyter_code>import re import string import warnings from concurrent.futures import ThreadPoolExecutor, as_completed from datetime import datetime import numpy as np from tqdm import tqdm def normalize_number_str(number_str: str) -> float: # we replace these common units and commas to allow # conversion to float for char in ["$", "%", ","]: number_str = number_str.replace(char, "") try: return float(number_str) except ValueError: return float("inf") def split_string( s: str, char_list: list[str] = [",", ";"], ) -> list[str]: pattern = f"[{''.join(char_list)}]" return re.split(pattern, s) def is_float(element: any) -> bool: try: float(element) return True except ValueError: return False def normalize_str(input_str, remove_punct=True) -> str: """ Normalize a string by: - Removing all white spaces - Optionally removing punctuation (if remove_punct is True) - Converting to lowercase Parameters: - input_str: str, the string to normalize - remove_punct: bool, whether to remove punctuation (default: True) Returns: - str, the normalized string """ # Remove all white spaces. Required e.g for seagull vs. sea gull no_spaces = re.sub(r"\s", "", input_str) # Remove punctuation, if specified. if remove_punct: translator = str.maketrans("", "", string.punctuation) return no_spaces.lower().translate(translator) else: return no_spaces.lower() def extract_numbers(text: str) -> list[str]: """This pattern matches: - Optional negative sign - Numbers with optional comma thousand separators - Optional decimal points with decimal numbers """ pattern = r"-?(?:\d{1,3}(?:,\d{3})+|\d+)(?:\.\d+)?" return [el.replace(",", "") for el in re.findall(pattern, text)] def get_question_score_gaia( model_answer: str, ground_truth: str, ) -> bool: """Scoring function used to score functions from the GAIA benchmark""" if is_float(ground_truth): normalized_answer = normalize_number_str(str(model_answer)) return normalized_answer == float(ground_truth) elif any(char in ground_truth for char in [",", ";"]): # if gt is a list # question with the fish: normalization removes punct gt_elems = split_string(ground_truth) ma_elems = split_string(model_answer) if len(gt_elems) != len(ma_elems): # check length is the same warnings.warn("Answer lists have different lengths, returning False.", UserWarning) return False comparisons = [] for ma_elem, gt_elem in zip(ma_elems, gt_elems): # compare each element as float or str if is_float(gt_elem): normalized_ma_elem = normalize_number_str(ma_elem) comparisons.append(normalized_ma_elem == float(gt_elem)) else: # we do not remove punct since comparisons can include punct comparisons.append( normalize_str(ma_elem, remove_punct=False) == normalize_str(gt_elem, remove_punct=False) ) return all(comparisons) else: # if gt is a str return normalize_str(model_answer) == normalize_str(ground_truth) def get_correct(row): if row["source"] == "MATH": # Checks the last number in answer numbers_answer = extract_numbers(str(row["answer"])) if len(numbers_answer) == 0: return False return np.isclose(float(numbers_answer[-1]), float(row["true_answer"]), rtol=1e-5, atol=1e-7) else: return get_question_score_gaia(str(row["answer"]), str(row["true_answer"])) def score_answers_subset(answers_dataset, answers_subset): try: print(answers_dataset, answers_subset) *model_id, action_type, task = answers_subset.split("__") model_id = "/".join(model_id) ds = datasets.load_dataset(answers_dataset, answers_subset, split="test") df = ds.to_pandas() df["correct"] = df.apply(get_correct, axis=1) assert df["correct"].notnull().sum() > 30, "Missing answers" acc = df["correct"].mean().item() result = df.loc[0, ["model_id", "agent_action_type", "source"]].to_dict() result["acc"] = acc return result except Exception as e: print(f"Error with {answers_subset}: {e}") return None def score_answers( answers_subsets, answers_dataset=ANSWERS_DATASET, date=DATE, push_to_hub_dataset=RESULTS_DATASET if PUSH_RESULTS_DATASET_TO_HUB else None, set_default=True, ): """ Score answers from the given dataset subsets. Parameters: answers_subsets: List of dataset subsets to score answers_dataset: Dataset containing the answers date: Date to use for the config name push_to_hub_dataset: Dataset ID to push results to, or None to skip pushing set_default: If True, sets this config as the default config in the Hugging Face Hub dataset. This means when users load the dataset without specifying a config, this version will be loaded by default. """ if not answers_dataset: raise ValueError("Pass 'answers_dataset' to load the answers from it") date = date or datetime.date.today().isoformat() results = [] with ThreadPoolExecutor(max_workers=16) as exe: futures = [ exe.submit(score_answers_subset, answers_dataset, answers_subset) for answers_subset in answers_subsets ] for f in tqdm(as_completed(futures), total=len(answers_subsets), desc="Processing tasks"): result = f.result() if result: results.append(result) df = pd.DataFrame(results) if push_to_hub_dataset: ds = datasets.Dataset.from_pandas(df) config = date ds.push_to_hub(push_to_hub_dataset, config_name=config, commit_message=f"Upload {config} results") return df<jupyter_output><empty_output><jupyter_text>Score answers<jupyter_code>import datasets import pandas as pd # Choose the answers subsets to score: # answers_subsets = ["meta-llama__Llama-3.1-8B-Instruct__code__gaia"] # or get all the answers subsets present in the ANSWERS_DATASET answers_subsets = datasets.get_dataset_config_names(ANSWERS_DATASET) print("Number of answers_subsets", len(answers_subsets)) print("Example of answers_subset", answers_subsets[0]) result_df = score_answers(answers_subsets) result_df["acc"] = (result_df["acc"] * 100).round(2) result_df.head() pivot_df = result_df.pivot_table( index=["model_id", "source"], columns=["agent_action_type"], values="acc", fill_value=float("nan"), ).reset_index()<jupyter_output><empty_output><jupyter_text>Display results<jupyter_code>display(pivot_df) import matplotlib.pyplot as plt from matplotlib.legend_handler import HandlerTuple # Added import # Assuming pivot_df is your original dataframe models = pivot_df["model_id"].unique() sources = pivot_df["source"].unique() # Create figure and axis plt.style.use("seaborn-v0_8-white") fig, ax = plt.subplots(figsize=(15, 6)) # Set the width of each bar group and positions of the bars width = 0.15 # width of each bar spacing = 0.02 # space between bars within a group group_spacing = 0.2 # space between model groups # Calculate positions for the bars num_sources = len(sources) total_width_per_group = (width + spacing) * num_sources * 2 # *2 for agent and vanilla x = np.arange(len(models)) * (total_width_per_group + group_spacing) # Plot bars for each source for i, source in enumerate(sources): source_data = pivot_df[pivot_df["source"] == source] agent_scores = [ source_data[source_data["model_id"] == model]["code"].values[0] if len(source_data[source_data["model_id"] == model]) > 0 else np.nan for model in models ] vanilla_scores = [ source_data[source_data["model_id"] == model]["vanilla"].values[0] if len(source_data[source_data["model_id"] == model]) > 0 else np.nan for model in models ] # Position calculation for each pair of bars pos = x + i * (width * 2 + spacing) agent_bars = ax.bar(pos, agent_scores, width, label=f"{source} (Agent)", alpha=0.8) vanilla_bars = ax.bar( pos + width * 0.6, vanilla_scores, width, hatch="////", alpha=0.5, hatch_linewidth=2, label=f"{source} (Vanilla)", color="white", edgecolor=agent_bars[0].get_facecolor(), ) # Customize the plot ax.set_ylabel("Score") ax.set_title("Model Performance Comparison") # Set x-axis ticks in the middle of each group group_centers = x + (total_width_per_group - spacing) / 2 ax.set_xticks(group_centers) # Wrap long model names to prevent overlap wrapped_labels = ["\n".join(model.split("/")) for model in models] ax.set_xticklabels(wrapped_labels, rotation=0, ha="center") # Modify legend to combine agent and vanilla entries handles, labels = ax.get_legend_handles_labels() unique_sources = sources legend_elements = [ (handles[i * 2], handles[i * 2 + 1], labels[i * 2].replace(" (Agent)", "")) for i in range(len(unique_sources)) ] custom_legend = ax.legend( [(agent_handle, vanilla_handle) for agent_handle, vanilla_handle, _ in legend_elements], [label for _, _, label in legend_elements], handler_map={tuple: HandlerTuple(ndivide=None)}, bbox_to_anchor=(1.05, 1), loc="upper left", ) ax.yaxis.grid(True, linestyle="--", alpha=0.3) ax.set_ylim(bottom=0) plt.tight_layout() ax.spines["top"].set_visible(False) ax.spines["right"].set_visible(False) plt.show()<jupyter_output><empty_output>
smolagents/examples/smolagents_benchmark/score.ipynb/0
{ "file_path": "smolagents/examples/smolagents_benchmark/score.ipynb", "repo_id": "smolagents", "token_count": 4251 }
275
system_prompt: |- You are an expert assistant who can solve any task using code blobs. You will be given a task to solve as best you can. To do so, you have been given access to a list of tools: these tools are basically Python functions which you can call with code. To solve the task, you must plan forward to proceed in a series of steps, in a cycle of Thought, Code, and Observation sequences. At each step, in the 'Thought:' sequence, you should first explain your reasoning towards solving the task and the tools that you want to use. Then in the Code sequence you should write the code in simple Python. The code sequence must be opened with '{{code_block_opening_tag}}', and closed with '{{code_block_closing_tag}}'. During each intermediate step, you can use 'print()' to save whatever important information you will then need. These print outputs will then appear in the 'Observation:' field, which will be available as input for the next step. In the end you have to return a final answer using the `final_answer` tool. Here are a few examples using notional tools: --- Task: "Generate an image of the oldest person in this document." Thought: I will proceed step by step and use the following tools: `document_qa` to find the oldest person in the document, then `image_generator` to generate an image according to the answer. {{code_block_opening_tag}} answer = document_qa(document=document, question="Who is the oldest person mentioned?") print(answer) {{code_block_closing_tag}} Observation: "The oldest person in the document is John Doe, a 55 year old lumberjack living in Newfoundland." Thought: I will now generate an image showcasing the oldest person. {{code_block_opening_tag}} image = image_generator("A portrait of John Doe, a 55-year-old man living in Canada.") final_answer(image) {{code_block_closing_tag}} --- Task: "What is the result of the following operation: 5 + 3 + 1294.678?" Thought: I will use python code to compute the result of the operation and then return the final answer using the `final_answer` tool {{code_block_opening_tag}} result = 5 + 3 + 1294.678 final_answer(result) {{code_block_closing_tag}} --- Task: "Answer the question in the variable `question` about the image stored in the variable `image`. The question is in French. You have been provided with these additional arguments, that you can access using the keys as variables in your python code: {'question': 'Quel est l'animal sur l'image?', 'image': 'path/to/image.jpg'}" Thought: I will use the following tools: `translator` to translate the question into English and then `image_qa` to answer the question on the input image. {{code_block_opening_tag}} translated_question = translator(question=question, src_lang="French", tgt_lang="English") print(f"The translated question is {translated_question}.") answer = image_qa(image=image, question=translated_question) final_answer(f"The answer is {answer}") {{code_block_closing_tag}} --- Task: In a 1979 interview, Stanislaus Ulam discusses with Martin Sherwin about other great physicists of his time, including Oppenheimer. What does he say was the consequence of Einstein learning too much math on his creativity, in one word? Thought: I need to find and read the 1979 interview of Stanislaus Ulam with Martin Sherwin. {{code_block_opening_tag}} pages = web_search(query="1979 interview Stanislaus Ulam Martin Sherwin physicists Einstein") print(pages) {{code_block_closing_tag}} Observation: No result found for query "1979 interview Stanislaus Ulam Martin Sherwin physicists Einstein". Thought: The query was maybe too restrictive and did not find any results. Let's try again with a broader query. {{code_block_opening_tag}} pages = web_search(query="1979 interview Stanislaus Ulam") print(pages) {{code_block_closing_tag}} Observation: Found 6 pages: [Stanislaus Ulam 1979 interview](https://ahf.nuclearmuseum.org/voices/oral-histories/stanislaus-ulams-interview-1979/) [Ulam discusses Manhattan Project](https://ahf.nuclearmuseum.org/manhattan-project/ulam-manhattan-project/) (truncated) Thought: I will read the first 2 pages to know more. {{code_block_opening_tag}} for url in ["https://ahf.nuclearmuseum.org/voices/oral-histories/stanislaus-ulams-interview-1979/", "https://ahf.nuclearmuseum.org/manhattan-project/ulam-manhattan-project/"]: whole_page = visit_webpage(url) print(whole_page) print("\n" + "="*80 + "\n") # Print separator between pages {{code_block_closing_tag}} Observation: Manhattan Project Locations: Los Alamos, NM Stanislaus Ulam was a Polish-American mathematician. He worked on the Manhattan Project at Los Alamos and later helped design the hydrogen bomb. In this interview, he discusses his work at (truncated) Thought: I now have the final answer: from the webpages visited, Stanislaus Ulam says of Einstein: "He learned too much mathematics and sort of diminished, it seems to me personally, it seems to me his purely physics creativity." Let's answer in one word. {{code_block_opening_tag}} final_answer("diminished") {{code_block_closing_tag}} --- Task: "Which city has the highest population: Guangzhou or Shanghai?" Thought: I need to get the populations for both cities and compare them: I will use the tool `web_search` to get the population of both cities. {{code_block_opening_tag}} for city in ["Guangzhou", "Shanghai"]: print(f"Population {city}:", web_search(f"{city} population") {{code_block_closing_tag}} Observation: Population Guangzhou: ['Guangzhou has a population of 15 million inhabitants as of 2021.'] Population Shanghai: '26 million (2019)' Thought: Now I know that Shanghai has the highest population. {{code_block_opening_tag}} final_answer("Shanghai") {{code_block_closing_tag}} --- Task: "What is the current age of the pope, raised to the power 0.36?" Thought: I will use the tool `wikipedia_search` to get the age of the pope, and confirm that with a web search. {{code_block_opening_tag}} pope_age_wiki = wikipedia_search(query="current pope age") print("Pope age as per wikipedia:", pope_age_wiki) pope_age_search = web_search(query="current pope age") print("Pope age as per google search:", pope_age_search) {{code_block_closing_tag}} Observation: Pope age: "The pope Francis is currently 88 years old." Thought: I know that the pope is 88 years old. Let's compute the result using python code. {{code_block_opening_tag}} pope_current_age = 88 ** 0.36 final_answer(pope_current_age) {{code_block_closing_tag}} Above example were using notional tools that might not exist for you. On top of performing computations in the Python code snippets that you create, you only have access to these tools, behaving like regular python functions: {{code_block_opening_tag}} {%- for tool in tools.values() %} {{ tool.to_code_prompt() }} {% endfor %} {{code_block_closing_tag}} {%- if managed_agents and managed_agents.values() | list %} You can also give tasks to team members. Calling a team member works similarly to calling a tool: provide the task description as the 'task' argument. Since this team member is a real human, be as detailed and verbose as necessary in your task description. You can also include any relevant variables or context using the 'additional_args' argument. Here is a list of the team members that you can call: {{code_block_opening_tag}} {%- for agent in managed_agents.values() %} def {{ agent.name }}(task: str, additional_args: dict[str, Any]) -> str: """{{ agent.description }} Args: task: Long detailed description of the task. additional_args: Dictionary of extra inputs to pass to the managed agent, e.g. images, dataframes, or any other contextual data it may need. """ {% endfor %} {{code_block_closing_tag}} {%- endif %} Here are the rules you should always follow to solve your task: 1. Always provide a 'Thought:' sequence, and a '{{code_block_opening_tag}}' sequence ending with '{{code_block_closing_tag}}', else you will fail. 2. Use only variables that you have defined! 3. Always use the right arguments for the tools. DO NOT pass the arguments as a dict as in 'answer = wikipedia_search({'query': "What is the place where James Bond lives?"})', but use the arguments directly as in 'answer = wikipedia_search(query="What is the place where James Bond lives?")'. 4. For tools WITHOUT JSON output schema: Take care to not chain too many sequential tool calls in the same code block, as their output format is unpredictable. For instance, a call to wikipedia_search without a JSON output schema has an unpredictable return format, so do not have another tool call that depends on its output in the same block: rather output results with print() to use them in the next block. 5. For tools WITH JSON output schema: You can confidently chain multiple tool calls and directly access structured output fields in the same code block! When a tool has a JSON output schema, you know exactly what fields and data types to expect, allowing you to write robust code that directly accesses the structured response (e.g., result['field_name']) without needing intermediate print() statements. 6. Call a tool only when needed, and never re-do a tool call that you previously did with the exact same parameters. 7. Don't name any new variable with the same name as a tool: for instance don't name a variable 'final_answer'. 8. Never create any notional variables in our code, as having these in your logs will derail you from the true variables. 9. You can use imports in your code, but only from the following list of modules: {{authorized_imports}} 10. The state persists between code executions: so if in one step you've created variables or imported modules, these will all persist. 11. Don't give up! You're in charge of solving the task, not providing directions to solve it. {%- if custom_instructions %} {{custom_instructions}} {%- endif %} Now Begin! planning: initial_plan : |- You are a world expert at analyzing a situation to derive facts, and plan accordingly towards solving a task. Below I will present you a task. You will need to 1. build a survey of facts known or needed to solve the task, then 2. make a plan of action to solve the task. ## 1. Facts survey You will build a comprehensive preparatory survey of which facts we have at our disposal and which ones we still need. These "facts" will typically be specific names, dates, values, etc. Your answer should use the below headings: ### 1.1. Facts given in the task List here the specific facts given in the task that could help you (there might be nothing here). ### 1.2. Facts to look up List here any facts that we may need to look up. Also list where to find each of these, for instance a website, a file... - maybe the task contains some sources that you should re-use here. ### 1.3. Facts to derive List here anything that we want to derive from the above by logical reasoning, for instance computation or simulation. Don't make any assumptions. For each item, provide a thorough reasoning. Do not add anything else on top of three headings above. ## 2. Plan Then for the given task, develop a step-by-step high-level plan taking into account the above inputs and list of facts. This plan should involve individual tasks based on the available tools, that if executed correctly will yield the correct answer. Do not skip steps, do not add any superfluous steps. Only write the high-level plan, DO NOT DETAIL INDIVIDUAL TOOL CALLS. After writing the final step of the plan, write the '<end_plan>' tag and stop there. You can leverage these tools, behaving like regular python functions: ```python {%- for tool in tools.values() %} {{ tool.to_code_prompt() }} {% endfor %} ``` {%- if managed_agents and managed_agents.values() | list %} You can also give tasks to team members. Calling a team member works similarly to calling a tool: provide the task description as the 'task' argument. Since this team member is a real human, be as detailed and verbose as necessary in your task description. You can also include any relevant variables or context using the 'additional_args' argument. Here is a list of the team members that you can call: ```python {%- for agent in managed_agents.values() %} def {{ agent.name }}(task: str, additional_args: dict[str, Any]) -> str: """{{ agent.description }} Args: task: Long detailed description of the task. additional_args: Dictionary of extra inputs to pass to the managed agent, e.g. images, dataframes, or any other contextual data it may need. """ {% endfor %} ``` {%- endif %} --- Now begin! Here is your task: ``` {{task}} ``` First in part 1, write the facts survey, then in part 2, write your plan. update_plan_pre_messages: |- You are a world expert at analyzing a situation, and plan accordingly towards solving a task. You have been given the following task: ``` {{task}} ``` Below you will find a history of attempts made to solve this task. You will first have to produce a survey of known and unknown facts, then propose a step-by-step high-level plan to solve the task. If the previous tries so far have met some success, your updated plan can build on these results. If you are stalled, you can make a completely new plan starting from scratch. Find the task and history below: update_plan_post_messages: |- Now write your updated facts below, taking into account the above history: ## 1. Updated facts survey ### 1.1. Facts given in the task ### 1.2. Facts that we have learned ### 1.3. Facts still to look up ### 1.4. Facts still to derive Then write a step-by-step high-level plan to solve the task above. ## 2. Plan ### 2. 1. ... Etc. This plan should involve individual tasks based on the available tools, that if executed correctly will yield the correct answer. Beware that you have {remaining_steps} steps remaining. Do not skip steps, do not add any superfluous steps. Only write the high-level plan, DO NOT DETAIL INDIVIDUAL TOOL CALLS. After writing the final step of the plan, write the '<end_plan>' tag and stop there. You can leverage these tools, behaving like regular python functions: ```python {%- for tool in tools.values() %} {{ tool.to_code_prompt() }} {% endfor %} ``` {%- if managed_agents and managed_agents.values() | list %} You can also give tasks to team members. Calling a team member works similarly to calling a tool: provide the task description as the 'task' argument. Since this team member is a real human, be as detailed and verbose as necessary in your task description. You can also include any relevant variables or context using the 'additional_args' argument. Here is a list of the team members that you can call: ```python {%- for agent in managed_agents.values() %} def {{ agent.name }}(task: str, additional_args: dict[str, Any]) -> str: """{{ agent.description }} Args: task: Long detailed description of the task. additional_args: Dictionary of extra inputs to pass to the managed agent, e.g. images, dataframes, or any other contextual data it may need. """ {% endfor %} ``` {%- endif %} Now write your updated facts survey below, then your new plan. managed_agent: task: |- You're a helpful agent named '{{name}}'. You have been submitted this task by your manager. --- Task: {{task}} --- You're helping your manager solve a wider task: so make sure to not provide a one-line answer, but give as much information as possible to give them a clear understanding of the answer. Your final_answer WILL HAVE to contain these parts: ### 1. Task outcome (short version): ### 2. Task outcome (extremely detailed version): ### 3. Additional context (if relevant): Put all these in your final_answer tool, everything that you do not pass as an argument to final_answer will be lost. And even if your task resolution is not successful, please return as much context as possible, so that your manager can act upon this feedback. report: |- Here is the final answer from your managed agent '{{name}}': {{final_answer}} final_answer: pre_messages: |- An agent tried to answer a user query but it got stuck and failed to do so. You are tasked with providing an answer instead. Here is the agent's memory: post_messages: |- Based on the above, please provide an answer to the following user task: {{task}}
smolagents/src/smolagents/prompts/code_agent.yaml/0
{ "file_path": "smolagents/src/smolagents/prompts/code_agent.yaml", "repo_id": "smolagents", "token_count": 4865 }
276
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import pytest from smolagents.agent_types import _AGENT_TYPE_MAPPING from smolagents.default_tools import ( DuckDuckGoSearchTool, PythonInterpreterTool, SpeechToTextTool, VisitWebpageTool, WikipediaSearchTool, ) from .test_tools import ToolTesterMixin from .utils.markers import require_run_all class DefaultToolTests(unittest.TestCase): def test_visit_webpage(self): arguments = {"url": "https://en.wikipedia.org/wiki/United_States_Secretary_of_Homeland_Security"} result = VisitWebpageTool()(arguments) assert isinstance(result, str) assert "United States Secretary of Homeland Security - Wikipedia\n\n[Jump to content]" in result @require_run_all def test_ddgs_with_kwargs(self): result = DuckDuckGoSearchTool(timeout=20)("DeepSeek parent company") assert isinstance(result, str) class TestPythonInterpreterTool(ToolTesterMixin): def setup_method(self): self.tool = PythonInterpreterTool(authorized_imports=["numpy"]) self.tool.setup() def test_exact_match_arg(self): result = self.tool("(2 / 2) * 4") assert result == "Stdout:\n\nOutput: 4.0" def test_exact_match_kwarg(self): result = self.tool(code="(2 / 2) * 4") assert result == "Stdout:\n\nOutput: 4.0" def test_agent_type_output(self): inputs = ["2 * 2"] output = self.tool(*inputs, sanitize_inputs_outputs=True) output_type = _AGENT_TYPE_MAPPING[self.tool.output_type] assert isinstance(output, output_type) def test_agent_types_inputs(self): inputs = ["2 * 2"] _inputs = [] for _input, expected_input in zip(inputs, self.tool.inputs.values()): input_type = expected_input["type"] if isinstance(input_type, list): _inputs.append([_AGENT_TYPE_MAPPING[_input_type](_input) for _input_type in input_type]) else: _inputs.append(_AGENT_TYPE_MAPPING[input_type](_input)) # Should not raise an error output = self.tool(*inputs, sanitize_inputs_outputs=True) output_type = _AGENT_TYPE_MAPPING[self.tool.output_type] assert isinstance(output, output_type) def test_imports_work(self): result = self.tool("import numpy as np") assert "import from numpy is not allowed" not in result.lower() def test_unauthorized_imports_fail(self): with pytest.raises(Exception) as e: self.tool("import sympy as sp") assert "sympy" in str(e).lower() class TestSpeechToTextTool: def test_new_instance(self): from transformers.models.whisper import WhisperForConditionalGeneration, WhisperProcessor tool = SpeechToTextTool() assert tool is not None assert tool.pre_processor_class == WhisperProcessor assert tool.model_class == WhisperForConditionalGeneration def test_initialization(self): from transformers.models.whisper import WhisperForConditionalGeneration, WhisperProcessor tool = SpeechToTextTool(model="dummy_model_id") assert tool is not None assert tool.pre_processor_class == WhisperProcessor assert tool.model_class == WhisperForConditionalGeneration @pytest.mark.parametrize( "language, content_type, extract_format, query", [ ("en", "summary", "HTML", "Python_(programming_language)"), # English, Summary Mode, HTML format ("en", "text", "WIKI", "Python_(programming_language)"), # English, Full Text Mode, WIKI format ("es", "summary", "HTML", "Python_(lenguaje_de_programación)"), # Spanish, Summary Mode, HTML format ("es", "text", "WIKI", "Python_(lenguaje_de_programación)"), # Spanish, Full Text Mode, WIKI format ], ) def test_wikipedia_search(language, content_type, extract_format, query): tool = WikipediaSearchTool( user_agent="TestAgent (test@example.com)", language=language, content_type=content_type, extract_format=extract_format, ) result = tool.forward(query) assert isinstance(result, str), "Output should be a string" assert "✅ **Wikipedia Page:**" in result, "Response should contain Wikipedia page title" assert "🔗 **Read more:**" in result, "Response should contain Wikipedia page URL" if content_type == "summary": assert len(result.split()) < 1000, "Summary mode should return a shorter text" if content_type == "text": assert len(result.split()) > 1000, "Full text mode should return a longer text"
smolagents/tests/test_default_tools.py/0
{ "file_path": "smolagents/tests/test_default_tools.py", "repo_id": "smolagents", "token_count": 1943 }
277
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import os import textwrap import unittest import pytest from IPython.core.interactiveshell import InteractiveShell from smolagents import Tool from smolagents.tools import tool from smolagents.utils import ( create_agent_gradio_app_template, get_source, instance_to_source, is_valid_name, parse_code_blobs, parse_json_blob, ) class ValidTool(Tool): name = "valid_tool" description = "A valid tool" inputs = {"input": {"type": "string", "description": "input"}} output_type = "string" simple_attr = "string" dict_attr = {"key": "value"} def __init__(self, optional_param="default"): super().__init__() self.param = optional_param def forward(self, input: str) -> str: return input.upper() @tool def valid_tool_function(input: str) -> str: """A valid tool function. Args: input (str): Input string. """ return input.upper() VALID_TOOL_SOURCE = """\ from smolagents.tools import Tool class ValidTool(Tool): name = "valid_tool" description = "A valid tool" inputs = {'input': {'type': 'string', 'description': 'input'}} output_type = "string" simple_attr = "string" dict_attr = {'key': 'value'} def __init__(self, optional_param="default"): super().__init__() self.param = optional_param def forward(self, input: str) -> str: return input.upper() """ VALID_TOOL_FUNCTION_SOURCE = '''\ from smolagents.tools import Tool class SimpleTool(Tool): name = "valid_tool_function" description = "A valid tool function." inputs = {'input': {'type': 'string', 'description': 'Input string.'}} output_type = "string" def __init__(self): self.is_initialized = True def forward(self, input: str) -> str: """A valid tool function. Args: input (str): Input string. """ return input.upper() ''' class AgentTextTests(unittest.TestCase): def test_parse_code_blobs(self): with pytest.raises(ValueError): parse_code_blobs("Wrong blob!", ("<code>", "</code>")) # Parsing mardkwon with code blobs should work output = parse_code_blobs( """ Here is how to solve the problem: <code> import numpy as np </code> """, ("<code>", "</code>"), ) assert output == "import numpy as np" # Parsing pure python code blobs should work code_blob = "import numpy as np" output = parse_code_blobs(code_blob, ("```python", "```")) assert output == code_blob # Allow whitespaces after header output = parse_code_blobs("<code> \ncode_a\n</code>", ("<code>", "</code>")) assert output == "code_a" # Parsing markdown with code blobs should work output = parse_code_blobs( """ Here is how to solve the problem: ```python import numpy as np ``` """, ("<code>", "</code>"), ) assert output == "import numpy as np" def test_multiple_code_blobs(self): test_input = "<code>\nFoo\n</code>\n\n<code>\ncode_a\n</code>\n\n<code>\ncode_b\n</code>" result = parse_code_blobs(test_input, ("<code>", "</code>")) assert result == "Foo\n\ncode_a\n\ncode_b" @pytest.fixture(scope="function") def ipython_shell(): """Reset IPython shell before and after each test.""" shell = InteractiveShell.instance() shell.reset() # Clean before test yield shell shell.reset() # Clean after test @pytest.mark.parametrize( "obj_name, code_blob", [ ("test_func", "def test_func():\n return 42"), ("TestClass", "class TestClass:\n ..."), ], ) def test_get_source_ipython(ipython_shell, obj_name, code_blob): ipython_shell.run_cell(code_blob, store_history=True) obj = ipython_shell.user_ns[obj_name] assert get_source(obj) == code_blob def test_get_source_standard_class(): class TestClass: ... source = get_source(TestClass) assert source == "class TestClass: ..." assert source == textwrap.dedent(inspect.getsource(TestClass)).strip() def test_get_source_standard_function(): def test_func(): ... source = get_source(test_func) assert source == "def test_func(): ..." assert source == textwrap.dedent(inspect.getsource(test_func)).strip() def test_get_source_ipython_errors_empty_cells(ipython_shell): test_code = textwrap.dedent("""class TestClass:\n ...""").strip() ipython_shell.user_ns["In"] = [""] ipython_shell.run_cell(test_code, store_history=True) with pytest.raises(ValueError, match="No code cells found in IPython session"): get_source(ipython_shell.user_ns["TestClass"]) def test_get_source_ipython_errors_definition_not_found(ipython_shell): test_code = textwrap.dedent("""class TestClass:\n ...""").strip() ipython_shell.user_ns["In"] = ["", "print('No class definition here')"] ipython_shell.run_cell(test_code, store_history=True) with pytest.raises(ValueError, match="Could not find source code for TestClass in IPython history"): get_source(ipython_shell.user_ns["TestClass"]) def test_get_source_ipython_errors_type_error(): with pytest.raises(TypeError, match="Expected class or callable"): get_source(None) @pytest.mark.parametrize( "tool, expected_tool_source", [(ValidTool(), VALID_TOOL_SOURCE), (valid_tool_function, VALID_TOOL_FUNCTION_SOURCE)] ) def test_instance_to_source(tool, expected_tool_source): tool_source = instance_to_source(tool, base_cls=Tool) assert tool_source == expected_tool_source def test_e2e_class_tool_save(tmp_path): class TestTool(Tool): name = "test_tool" description = "Test tool description" inputs = { "task": { "type": "string", "description": "tool input", } } output_type = "string" def forward(self, task: str): import IPython # noqa: F401 return task test_tool = TestTool() test_tool.save(tmp_path, make_gradio_app=True) assert set(os.listdir(tmp_path)) == {"requirements.txt", "app.py", "tool.py"} assert (tmp_path / "tool.py").read_text() == textwrap.dedent( """\ from typing import Any, Optional from smolagents.tools import Tool import IPython class TestTool(Tool): name = "test_tool" description = "Test tool description" inputs = {'task': {'type': 'string', 'description': 'tool input'}} output_type = "string" def forward(self, task: str): import IPython # noqa: F401 return task def __init__(self, *args, **kwargs): self.is_initialized = False """ ) requirements = set((tmp_path / "requirements.txt").read_text().split()) assert requirements == {"IPython", "smolagents"} assert (tmp_path / "app.py").read_text() == textwrap.dedent( """\ from smolagents import launch_gradio_demo from tool import TestTool tool = TestTool() launch_gradio_demo(tool) """ ) def test_e2e_ipython_class_tool_save(tmp_path): shell = InteractiveShell.instance() code_blob = textwrap.dedent( f"""\ from smolagents.tools import Tool class TestTool(Tool): name = "test_tool" description = "Test tool description" inputs = {{"task": {{"type": "string", "description": "tool input", }} }} output_type = "string" def forward(self, task: str): import IPython # noqa: F401 return task TestTool().save("{tmp_path}", make_gradio_app=True) """ ) assert shell.run_cell(code_blob, store_history=True).success assert set(os.listdir(tmp_path)) == {"requirements.txt", "app.py", "tool.py"} assert (tmp_path / "tool.py").read_text() == textwrap.dedent( """\ from typing import Any, Optional from smolagents.tools import Tool import IPython class TestTool(Tool): name = "test_tool" description = "Test tool description" inputs = {'task': {'type': 'string', 'description': 'tool input'}} output_type = "string" def forward(self, task: str): import IPython # noqa: F401 return task def __init__(self, *args, **kwargs): self.is_initialized = False """ ) requirements = set((tmp_path / "requirements.txt").read_text().split()) assert requirements == {"IPython", "smolagents"} assert (tmp_path / "app.py").read_text() == textwrap.dedent( """\ from smolagents import launch_gradio_demo from tool import TestTool tool = TestTool() launch_gradio_demo(tool) """ ) def test_e2e_function_tool_save(tmp_path): @tool def test_tool(task: str) -> str: """ Test tool description Args: task: tool input """ import IPython # noqa: F401 return task test_tool.save(tmp_path, make_gradio_app=True) assert set(os.listdir(tmp_path)) == {"requirements.txt", "app.py", "tool.py"} assert (tmp_path / "tool.py").read_text() == textwrap.dedent( """\ from smolagents import Tool from typing import Any, Optional class SimpleTool(Tool): name = "test_tool" description = "Test tool description" inputs = {'task': {'type': 'string', 'description': 'tool input'}} output_type = "string" def forward(self, task: str) -> str: \""" Test tool description Args: task: tool input \""" import IPython # noqa: F401 return task""" ) requirements = set((tmp_path / "requirements.txt").read_text().split()) assert requirements == {"smolagents"} # FIXME: IPython should be in the requirements assert (tmp_path / "app.py").read_text() == textwrap.dedent( """\ from smolagents import launch_gradio_demo from tool import SimpleTool tool = SimpleTool() launch_gradio_demo(tool) """ ) def test_e2e_ipython_function_tool_save(tmp_path): shell = InteractiveShell.instance() code_blob = textwrap.dedent( f""" from smolagents import tool @tool def test_tool(task: str) -> str: \""" Test tool description Args: task: tool input \""" import IPython # noqa: F401 return task test_tool.save("{tmp_path}", make_gradio_app=True) """ ) assert shell.run_cell(code_blob, store_history=True).success assert set(os.listdir(tmp_path)) == {"requirements.txt", "app.py", "tool.py"} assert (tmp_path / "tool.py").read_text() == textwrap.dedent( """\ from smolagents import Tool from typing import Any, Optional class SimpleTool(Tool): name = "test_tool" description = "Test tool description" inputs = {'task': {'type': 'string', 'description': 'tool input'}} output_type = "string" def forward(self, task: str) -> str: \""" Test tool description Args: task: tool input \""" import IPython # noqa: F401 return task""" ) requirements = set((tmp_path / "requirements.txt").read_text().split()) assert requirements == {"smolagents"} # FIXME: IPython should be in the requirements assert (tmp_path / "app.py").read_text() == textwrap.dedent( """\ from smolagents import launch_gradio_demo from tool import SimpleTool tool = SimpleTool() launch_gradio_demo(tool) """ ) @pytest.mark.parametrize( "raw_json, expected_data, expected_blob", [ ( """{}""", {}, "", ), ( """Text{}""", {}, "Text", ), ( """{"simple": "json"}""", {"simple": "json"}, "", ), ( """With text here{"simple": "json"}""", {"simple": "json"}, "With text here", ), ( """{"simple": "json"}With text after""", {"simple": "json"}, "", ), ( """With text before{"simple": "json"}And text after""", {"simple": "json"}, "With text before", ), ], ) def test_parse_json_blob_with_valid_json(raw_json, expected_data, expected_blob): data, blob = parse_json_blob(raw_json) assert data == expected_data assert blob == expected_blob @pytest.mark.parametrize( "raw_json", [ """simple": "json"}""", """With text here"simple": "json"}""", """{"simple": ""json"}With text after""", """{"simple": "json"With text after""", "}}", ], ) def test_parse_json_blob_with_invalid_json(raw_json): with pytest.raises(Exception): parse_json_blob(raw_json) @pytest.mark.parametrize( "name,expected", [ # Valid identifiers ("valid_name", True), ("ValidName", True), ("valid123", True), ("_private", True), # Invalid identifiers ("", False), ("123invalid", False), ("invalid-name", False), ("invalid name", False), ("invalid.name", False), # Python keywords ("if", False), ("for", False), ("class", False), ("return", False), # Non-string inputs (123, False), (None, False), ([], False), ({}, False), ], ) def test_is_valid_name(name, expected): """Test the is_valid_name function with various inputs.""" assert is_valid_name(name) is expected def test_agent_gradio_app_template_excludes_class_keyword(): """Test that the AGENT_GRADIO_APP_TEMPLATE excludes 'class' from agent kwargs.""" # Mock agent_dict with 'class' key that should be excluded agent_dict = { "model": {"class": "CodeAgent", "data": {}}, "class": "CodeAgent", # This should be excluded to prevent SyntaxError "some_valid_attr": "value", "tools": [], "managed_agents": {}, "requirements": [], "prompt_templates": {}, } template = create_agent_gradio_app_template() result = template.render( agent_name="test_agent", class_name="CodeAgent", agent_dict=agent_dict, tools={}, managed_agents={}, managed_agent_relative_path="", ) # Should contain valid attribute but not 'class=' as a keyword argument assert "some_valid_attr='value'," in result assert "class=" not in result # Verify the generated code is syntactically valid Python import ast try: ast.parse(result) except SyntaxError as e: pytest.fail(f"Generated app.py contains syntax error: {e}")
smolagents/tests/test_utils.py/0
{ "file_path": "smolagents/tests/test_utils.py", "repo_id": "smolagents", "token_count": 7047 }
278
FROM nvidia/cuda:12.8.0-cudnn-devel-ubuntu24.04 AS deps ARG llamacpp_version=b4827 ARG llamacpp_cuda=OFF ARG llamacpp_native=ON ARG llamacpp_cpu_arm_arch=native ARG cuda_arch=75-real;80-real;86-real;89-real;90-real WORKDIR /opt/src ENV DEBIAN_FRONTEND=noninteractive RUN apt update && apt upgrade -y && apt install -y \ clang \ cmake \ curl \ git \ python3-dev \ libssl-dev \ pkg-config \ tar ADD https://github.com/ggml-org/llama.cpp/archive/refs/tags/${llamacpp_version}.tar.gz /opt/src/ RUN mkdir -p llama.cpp \ && tar -xzf ${llamacpp_version}.tar.gz -C llama.cpp --strip-components=1 \ && cd llama.cpp \ && cmake -B build \ -DCMAKE_INSTALL_PREFIX=/usr \ -DCMAKE_INSTALL_LIBDIR=/usr/lib \ -DCMAKE_C_COMPILER=clang \ -DCMAKE_CXX_COMPILER=clang++ \ -DCMAKE_CUDA_ARCHITECTURES=${cuda_arch} \ -DGGML_CUDA=${llamacpp_cuda} \ -DGGML_NATIVE=${llamacpp_native} \ -DGGML_CPU_ARM_ARCH=${llamacpp_cpu_arm_arch} \ -DLLAMA_BUILD_COMMON=OFF \ -DLLAMA_BUILD_TESTS=OFF \ -DLLAMA_BUILD_EXAMPLES=OFF \ -DLLAMA_BUILD_SERVER=OFF \ && cmake --build build --parallel --config Release \ && cmake --install build WORKDIR /app COPY rust-toolchain.toml rust-toolchain.toml RUN curl -sSf https://sh.rustup.rs | sh -s -- --no-modify-path --default-toolchain 1.85.1 --profile minimal -y ENV PATH="/root/.cargo/bin:$PATH" RUN cargo install cargo-chef --locked FROM deps AS planner COPY . . RUN cargo chef prepare --recipe-path recipe.json FROM deps AS builder COPY --from=planner /app/recipe.json recipe.json RUN cargo chef cook \ --recipe-path recipe.json \ --profile release \ --package text-generation-router-llamacpp COPY . . RUN cargo build \ --profile release \ --package text-generation-router-llamacpp --frozen FROM nvidia/cuda:12.8.0-cudnn-runtime-ubuntu24.04 WORKDIR /app ENV DEBIAN_FRONTEND=noninteractive RUN apt update && apt upgrade -y && apt install -y \ python3-venv \ python3-pip RUN python3 -m venv /venv ENV PATH="/venv/bin:$PATH" COPY backends/llamacpp/requirements.txt requirements.txt COPY --from=builder /opt/src/llama.cpp/gguf-py gguf-py COPY --from=builder /opt/src/llama.cpp/convert_hf_to_gguf.py /bin/ RUN pip3 install --no-cache-dir \ -r requirements.txt \ -e gguf-py COPY --from=builder /usr/lib/libllama.so /usr/lib/ COPY --from=builder /usr/lib/libggml*.so /usr/lib/ COPY --from=builder /app/target/release/text-generation-router-llamacpp /usr/bin/ ENV HF_HUB_ENABLE_HF_TRANSFER=1 ENTRYPOINT ["text-generation-router-llamacpp"]
text-generation-inference/Dockerfile_llamacpp/0
{ "file_path": "text-generation-inference/Dockerfile_llamacpp", "repo_id": "text-generation-inference", "token_count": 1128 }
279
#[allow(clippy::derive_partial_eq_without_eq)] mod pb; mod client; mod sharded_client; pub use client::Client; pub use pb::generate::v3::{ input_chunk::Chunk, Batch, CachedBatch, FinishReason, GeneratedText, Generation, GrammarType, HealthResponse, Image, InfoResponse, Input, InputChunk, NextTokenChooserParameters, Request, StoppingCriteriaParameters, Tokens, }; pub use sharded_client::ShardedClient;
text-generation-inference/backends/client/src/v3/mod.rs/0
{ "file_path": "text-generation-inference/backends/client/src/v3/mod.rs", "repo_id": "text-generation-inference", "token_count": 142 }
280
from typing import Tuple from dataclasses import dataclass, field import torch from text_generation_server.models.globals import BLOCK_SIZE from text_generation_server.utils.weights import Weights @dataclass class KVScales: """ Key-value scales for FP8 KV cache. This data class stores key and value scales both as a GPU tensor and as a GPU float. This inconvenience is necessary because some functions (e.g. scaling kernels) take scales as a GPU tensor, whereas others (e.g. flashinfer) take scales as a CPU scalar. """ key_scale: torch.Tensor value_scale: torch.Tensor key_scale_cpu: float = field(init=False) value_scale_cpu: float = field(init=False) def __post_init__(self): if self.key_scale.numel() != 1 or self.value_scale.numel() != 1: raise ValueError("Key and value scales must be scalar tensors.") self.key_scale_cpu = self.key_scale.item() self.value_scale_cpu = self.value_scale.item() class KVCache: """ Key-value cache for attention layers. """ kv_cache: Tuple[torch.Tensor, torch.Tensor] def __init__( self, *, num_blocks: int, num_heads: int, head_size: int, dtype: torch.dtype, device: torch.device, ): """Construct the key-value cache for a layer.""" ## TODO FP8 kv cache support if dtype is torch.float8_e5m2: raise ValueError("torch.float8_e5m2 is not supported in hpu. ") self.kv_cache = ( torch.zeros( (num_blocks * BLOCK_SIZE, num_heads, head_size), dtype=dtype, device=device, ), torch.zeros( (num_blocks * BLOCK_SIZE, num_heads, head_size), dtype=dtype, device=device, ), ) @property def dtype(self): """Get the data type of the cache.""" return self.kv_cache[0].dtype @property def key(self): """Get the key cache.""" return self.kv_cache[0] @property def value(self): """Get the value cache.""" return self.kv_cache[1] def store( self, *, key: torch.Tensor, value: torch.Tensor, slots: torch.Tensor, kv_scales: KVScales, ): """Store the key and value at the given slots.""" ## TODO FP8 kv cache support key_cache = self.kv_cache[0] value_cache = self.kv_cache[1] paged_reshape_and_cache( key, value, key_cache, value_cache, slots, kv_scales.key_scale, kv_scales.value_scale, ) class KVCompressCache(KVCache): """ Key-value cache for attention layers. """ kv_cache: torch.Tensor def __init__( self, *, num_blocks: int, head_size: int, dtype: torch.dtype, device: torch.device, ): """Construct the key-value cache for a layer.""" ## TODO FP8 kv cache support if dtype is torch.float8_e5m2: raise ValueError("torch.float8_e5m2 is not supported in hpu. ") self.kv_cache = torch.zeros( (num_blocks * BLOCK_SIZE, 1, head_size), dtype=dtype, device=device, ) @property def dtype(self): """Get the data type of the cache.""" return self.kv_cache.dtype @property def key(self): """Get the key cache.""" return self.kv_cache @property def value(self): """Get the value cache.""" return self.kv_cache def store( self, *, key: torch.Tensor, value: torch.Tensor, slots: torch.Tensor, kv_scales: KVScales, ): """Store the key and value at the given slots.""" ## TODO FP8 kv cache support if self.kv_cache.dtype == torch.float8_e4m3fn: key = torch.ops.hpu.cast_to_fp8_v2( key, kv_scales.key_scale, False, False, torch.float8_e4m3fn )[0] self.kv_cache.index_copy_(0, slots, key) def paged_reshape_and_cache( key: torch.Tensor, value: torch.Tensor, key_cache: torch.Tensor, value_cache: torch.Tensor, slots: torch.Tensor, k_scale: torch.Tensor, v_scale: torch.Tensor, ): if key_cache.dtype == torch.float8_e4m3fn: key = torch.ops.hpu.cast_to_fp8_v2( key, k_scale, False, False, torch.float8_e4m3fn )[0] value = torch.ops.hpu.cast_to_fp8_v2( value, v_scale, False, False, torch.float8_e4m3fn )[0] key_cache.index_copy_(0, slots, key) value_cache.index_copy_(0, slots, value) def get_kv_scales(weights: Weights, prefix: str) -> KVScales: """Load KV cache scales.""" key_scale = torch.tensor(1.0, dtype=torch.float32, device=weights.device) value_scale = key_scale if weights.has_tensor(f"{prefix}.k_scale") and weights.has_tensor( f"{prefix}.v_scale" ): key_scale = weights.get_tensor(f"{prefix}.k_scale", to_dtype=False).float() value_scale = weights.get_tensor(f"{prefix}.v_scale", to_dtype=False).float() elif weights.has_tensor(f"{prefix}.kv_scale"): # Fall back to older more coarse-grained scale when available. key_scale = weights.get_tensor(f"{prefix}.kv_scale").float() value_scale = key_scale return KVScales(key_scale=key_scale, value_scale=value_scale)
text-generation-inference/backends/gaudi/server/text_generation_server/layers/attention/kv_cache.py/0
{ "file_path": "text-generation-inference/backends/gaudi/server/text_generation_server/layers/attention/kv_cache.py", "repo_id": "text-generation-inference", "token_count": 2679 }
281
import torch from torch.nn import functional as F class FastLinear(torch.nn.Module): def __init__( self, weight, bias, ) -> None: super().__init__() self.weight = torch.nn.Parameter(weight, requires_grad=False) if bias is not None: self.bias = torch.nn.Parameter(bias, requires_grad=False) else: self.bias = None @classmethod def load(cls, config, prefix: str, weights, bias: bool): weight = weights.get_tensor(f"{prefix}.weight") if bias: bias = weights.get_tensor(f"{prefix}.bias") else: bias = None return cls(weight, bias) def forward(self, input: torch.Tensor) -> torch.Tensor: return F.linear(input, self.weight, self.bias) def get_linear(weight, bias): # Weights that are loaded through methods that are not # quantization-aware are still bare tensors. We may want # to change this in the future. if isinstance(weight, torch.Tensor): return FastLinear(weight, bias) return weight.get_linear(bias)
text-generation-inference/backends/gaudi/server/text_generation_server/layers/linear.py/0
{ "file_path": "text-generation-inference/backends/gaudi/server/text_generation_server/layers/linear.py", "repo_id": "text-generation-inference", "token_count": 469 }
282
# coding=utf-8 # Copyright 2022 HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.distributed from torch import nn from transformers.activations import ACT2FN from transformers.configuration_utils import PretrainedConfig from typing import Optional, List, Tuple, Any from text_generation_server.layers.attention.kv_cache import get_kv_scales from text_generation_server.layers.attention import ( paged_attention, attention, set_block_mapping, Seqlen, HPUPagedAttentionMetadata, ) from text_generation_server.layers import ( FastLinear, TensorParallelRowLinear, TensorParallelColumnLinear, TensorParallelEmbedding, SpeculativeHead, get_linear, ) from text_generation_server.layers.rotary import ( PositionRotaryEmbedding, ) from text_generation_server.layers.layernorm import ( FastLayerNorm, ) from vllm_hpu_extension.ops import DynamicFusedMOE import habana_frameworks.torch as htorch class DbrxAttentionConfig(PretrainedConfig): def __init__( self, attn_pdrop: float = 0, clip_qkv: Optional[float] = None, kv_n_heads: int = 1, rope_theta: float = 10000.0, **kwargs: Any, ): super().__init__(**kwargs) self.attn_pdrop = attn_pdrop self.clip_qkv = clip_qkv self.kv_n_heads = kv_n_heads self.rope_theta = rope_theta for k in ["model_type"]: if k in kwargs: kwargs.pop(k) if len(kwargs) != 0: raise ValueError(f"Found unknown {kwargs=}") class DbrxFFNConfig(PretrainedConfig): def __init__( self, ffn_act_fn: Optional[dict] = None, ffn_hidden_size: int = 3584, moe_num_experts: int = 4, moe_top_k: int = 1, moe_jitter_eps: Optional[float] = None, moe_loss_weight: float = 0.01, moe_normalize_expert_weights: Optional[float] = 1, uniform_expert_assignment: bool = False, **kwargs: Any, ): super().__init__() if ffn_act_fn is None: ffn_act_fn = {"name": "silu"} self.ffn_act_fn = ffn_act_fn self.ffn_hidden_size = ffn_hidden_size self.moe_num_experts = moe_num_experts self.moe_top_k = moe_top_k self.moe_jitter_eps = moe_jitter_eps self.moe_loss_weight = moe_loss_weight self.moe_normalize_expert_weights = moe_normalize_expert_weights self.uniform_expert_assignment = uniform_expert_assignment if uniform_expert_assignment: raise ValueError("`uniform_expert_assignment = True` is not supported") for k in ["model_type"]: if k in kwargs: kwargs.pop(k) if len(kwargs) != 0: raise ValueError(f"Found unknown {kwargs=}") class DbrxConfig(PretrainedConfig): attribute_map = { "hidden_size": "d_model", "num_attention_heads": "n_heads", "num_hidden_layers": "n_layers", } def __init__( self, d_model: int = 2048, n_heads: int = 16, n_layers: int = 24, max_seq_len: int = 2048, vocab_size: int = 32000, resid_pdrop: float = 0.0, emb_pdrop: float = 0.0, attn_config: Optional[DbrxAttentionConfig] = None, ffn_config: Optional[DbrxFFNConfig] = None, use_cache: bool = True, initializer_range: float = 0.02, output_router_logits: bool = False, router_aux_loss_coef: float = 0.05, **kwargs: Any, ): if attn_config is None: self.attn_config = DbrxAttentionConfig() elif isinstance(attn_config, dict): self.attn_config = DbrxAttentionConfig(**attn_config) else: self.attn_config = attn_config if ffn_config is None: self.ffn_config = DbrxFFNConfig() elif isinstance(ffn_config, dict): self.ffn_config = DbrxFFNConfig(**ffn_config) else: self.ffn_config = ffn_config self.d_model = d_model self.n_heads = n_heads self.n_layers = n_layers self.max_seq_len = max_seq_len self.vocab_size = vocab_size self.resid_pdrop = resid_pdrop self.emb_pdrop = emb_pdrop self.use_cache = use_cache self.initializer_range = initializer_range self.output_router_logits = output_router_logits self.router_aux_loss_coef = router_aux_loss_coef tie_word_embeddings = kwargs.pop("tie_word_embeddings", False) if tie_word_embeddings: raise ValueError("tie_word_embeddings is not supported for Dbrx models.") super().__init__( tie_word_embeddings=tie_word_embeddings, **kwargs, ) @property def num_key_value_heads(self): # We can't use the attribute map, since this the number of KV # heads is not top-level. return self.attn_config.kv_n_heads def promote_scalar(x: torch.Tensor) -> torch.Tensor: return x.view(1) if len(x.size()) == 0 else x def load_attention(config, prefix, weights): return TensorParallelColumnLinear.load_qkv( config, prefix=f"{prefix}.Wqkv", weights=weights, bias=False, num_heads=config.n_heads, num_key_value_heads=config.attn_config.kv_n_heads, ) def _load_experts(config, prefix, weights): world_size = weights.process_group.size() rank = weights.process_group.rank() assert ( config.ffn_config.ffn_hidden_size % world_size == 0 ), f"The chosen size {config.ffn_config.ffn_hidden_size} is not compatible with sharding on {world_size} shards" expert_size = config.ffn_config.ffn_hidden_size block_size = expert_size // world_size start = rank * block_size stop = (rank + 1) * block_size tensor = torch.empty( (config.ffn_config.moe_num_experts * block_size, config.d_model), dtype=weights.dtype, device=weights.device, ) slice_ = weights._get_slice(f"{prefix}") for i in range(config.ffn_config.moe_num_experts): offset = i * expert_size expert_slice = slice_[start + offset : stop + offset] tensor[i * block_size : (i + 1) * block_size] = expert_slice.to( dtype=weights.dtype ).to(device=weights.device) return tensor def _load_experts_quantized(config, prefix, weights, cls): world_size = weights.process_group.size() rank = weights.process_group.rank() assert ( config.ffn_config.ffn_hidden_size % world_size == 0 ), f"The chosen size {config.ffn_config.ffn_hidden_size} is not compatible with sharding on {world_size} shards" expert_size = config.ffn_config.ffn_hidden_size block_size = expert_size // world_size start = rank * block_size stop = (rank + 1) * block_size slice_ = weights._get_slice(f"{prefix}") experts = [] for i in range(config.ffn_config.moe_num_experts): if config.quantize in ["gptq", "awq"]: raise NotImplementedError( "Dbrx does not support gptq/awq quantization yet." ) else: offset = i * expert_size expert_slice = ( slice_[start + offset : stop + offset] .to(dtype=weights.dtype) .to(device=weights.device) ) if cls == TensorParallelRowLinear: expert_slice = expert_slice.t().contiguous() linear = get_linear(expert_slice, None) experts.append(cls(linear, weights.process_group)) else: linear = get_linear(expert_slice, None) experts.append(cls(linear)) return experts class DbrxAttention(torch.nn.Module): def __init__( self, prefix: str, config, weights, rotary_emb, ): super().__init__() self.clip_qkv = config.attn_config.clip_qkv self.num_heads = config.n_heads self.hidden_size = config.d_model self.head_size = self.hidden_size // self.num_heads self.rotary_emb = rotary_emb self.softmax_scale = self.head_size**-0.5 if self.num_heads % weights.process_group.size() != 0: raise ValueError( f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} " f"and `num_shards`: {weights.process_group.size()}" ) self.num_heads = self.num_heads // weights.process_group.size() self.num_key_value_heads = ( config.attn_config.kv_n_heads // weights.process_group.size() ) self.query_key_value = load_attention(config, prefix, weights) self.kv_scales = get_kv_scales(weights, f"{prefix}") self.o_proj = TensorParallelRowLinear.load( config, prefix=f"{prefix}.out_proj", weights=weights, bias=False, ) self.num_groups = self.num_heads // self.num_key_value_heads self.kv_head_mapping = torch.arange( 0, self.num_key_value_heads, dtype=torch.int32, device=weights.device ).repeat_interleave(self.num_groups) def forward( self, hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, slots, seqlen, hpu_attention_meta, ): qkv = self.query_key_value(hidden_states) if self.clip_qkv is not None: qkv = qkv.clamp(min=-self.clip_qkv, max=self.clip_qkv) query, kv = qkv.split( [ self.head_size * self.num_heads, 2 * self.head_size * self.num_key_value_heads, ], dim=1, ) query = query.view(-1, self.num_heads, self.head_size) kv = kv.view(-1, 2, self.num_key_value_heads, self.head_size) self.rotary_emb(query, torch.select(kv, dim=1, index=0), cos, sin) kv_cache.store( key=kv[:, 0], value=kv[:, 1], slots=slots, kv_scales=self.kv_scales, ) # Prefill if cu_seqlen_prefill is not None: # sdpa attn_output = attention( query=query, key=kv[:, 0], value=kv[:, 1], kv_cache=kv_cache, kv_scales=self.kv_scales, seqlen=seqlen, softmax_scale=self.softmax_scale, ) # Decode else: attn_output = paged_attention( query, kv_cache, self.kv_head_mapping, self.softmax_scale, seqlen, kv_scales=self.kv_scales, hpu_attention_meta=hpu_attention_meta, ) return self.o_proj(attn_output.view(-1, self.num_heads * self.head_size)) class DbrxNormAttentionNorm(nn.Module): def __init__( self, prefix: str, config, weights, rotary_emb, ): super().__init__() self.norm_1 = FastLayerNorm.load_no_bias( prefix=f"{prefix}.norm_1", weights=weights, eps=1e-5 ) self.self_attn = DbrxAttention( prefix=f"{prefix}.attn", config=config, weights=weights, rotary_emb=rotary_emb, ) self.norm_2 = FastLayerNorm.load_no_bias( prefix=f"{prefix}.norm_2", weights=weights, eps=1e-5, ) def forward( self, hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache, slots, seqlen, hpu_attention_meta, ): normed_hidden_states, res = self.norm_1(hidden_states, residual) # Self Attention attn_output = self.self_attn( normed_hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, slots, seqlen, hpu_attention_meta, ) # faster post attention rms norm normed_attn_res_output, attn_res = self.norm_2(attn_output, res) return normed_attn_res_output, attn_res @torch.jit.script def select_experts( gate_logits: torch.Tensor, top_k: int, moe_normalize_expert_weights: int ): # all_probs: (sequence_length, n_experts) and upcast for softmax all_probs = torch.nn.functional.softmax(gate_logits, dim=1, dtype=torch.float) # weights, selected_experts: (sequence_length, top-k) weights, selected_experts = torch.topk(all_probs, top_k, dim=-1) if moe_normalize_expert_weights: weights = weights / torch.norm( weights, p=moe_normalize_expert_weights, dim=-1, keepdim=True ) weights = weights.view(-1) selected_experts = selected_experts.view(-1) return selected_experts, weights @torch.jit.script def round_up(x: torch.Tensor, value: int): return torch.div(x + (value - 1), value, rounding_mode="trunc") * value class BlockSparseMoE(nn.Module): def __init__(self, prefix, config: DbrxConfig, weights): super().__init__() self.moe_normalize_expert_weights = ( config.ffn_config.moe_normalize_expert_weights ) self.hidden_dim = config.d_model self.ffn_dim = config.ffn_config.ffn_hidden_size // weights.process_group.size() self.num_experts = config.ffn_config.moe_num_experts self.top_k = config.ffn_config.moe_top_k act = config.ffn_config.ffn_act_fn["name"] if "gelu" in act: self.act = lambda x: torch.nn.functional.gelu( x, approximate=( "tanh" if act in ["gelu_fast", "gelu_pytorch_tanh"] else "none" ), ) elif "silu" in act: self.act = torch.nn.functional.silu else: self.act = ACT2FN[act] # gating self.gate = FastLinear.load( config, f"{prefix}.router.layer", weights, bias=False ) # merged expert weights, all of size (n_experts * ffn_dim, hidden_dim) w1 = _load_experts(config, f"{prefix}.experts.mlp.w1", weights).view( self.num_experts, self.ffn_dim, self.hidden_dim ) v1 = _load_experts(config, f"{prefix}.experts.mlp.v1", weights).view( self.num_experts, self.ffn_dim, self.hidden_dim ) self.wv1 = torch.cat([w1, v1], dim=1) self.w2 = ( _load_experts(config, f"{prefix}.experts.mlp.w2", weights) .view(self.num_experts, self.ffn_dim, self.hidden_dim) .transpose(1, 2) .contiguous() ) self.process_group = weights.process_group self.hpu_fused_moe = DynamicFusedMOE(self.num_experts) for i in range(self.num_experts): self.hpu_fused_moe.MoeOp.w13_list[i].set_weight(self.wv1[i]) self.hpu_fused_moe.MoeOp.w2_list[i].set_weight(self.w2[i]) def forward(self, x: torch.Tensor) -> torch.Tensor: # router_logits: (num_tokens, n_experts) router_logits = self.gate(x) out = self.hpu_fused_moe(x, router_logits, self.top_k) # Reduce sum if self.process_group.size() > 1: torch.distributed.all_reduce(out, group=self.process_group) return out.view(*x.shape) class DenseMoE(nn.Module): def __init__(self, prefix, config: DbrxConfig, weights): super().__init__() self.moe_normalize_expert_weights = ( config.ffn_config.moe_normalize_expert_weights ) self.hidden_dim = config.d_model self.ffn_dim = config.ffn_config.ffn_hidden_size // weights.process_group.size() self.num_experts = config.ffn_config.moe_num_experts self.top_k = config.ffn_config.moe_top_k act = config.ffn_config.ffn_act_fn["name"] if "gelu" in act: self.act = lambda x: torch.nn.functional.gelu( x, approximate=( "tanh" if act in ["gelu_fast", "gelu_pytorch_tanh"] else "none" ), ) elif "silu" in act: self.act = torch.nn.functional.silu else: self.act = ACT2FN[act] # gating self.gate = FastLinear.load( config, f"{prefix}.router.layer", weights, bias=False ) self.w1 = _load_experts_quantized( config, prefix=f"{prefix}.experts.mlp.w1", weights=weights, cls=TensorParallelColumnLinear, ) self.w2 = _load_experts_quantized( config, prefix=f"{prefix}.experts.mlp.w2", weights=weights, cls=TensorParallelRowLinear, ) self.v1 = _load_experts_quantized( config, prefix=f"{prefix}.experts.mlp.v1", weights=weights, cls=TensorParallelColumnLinear, ) self.process_group = weights.process_group def forward(self, x: torch.Tensor) -> torch.Tensor: """ x: (sequence_length, model_dim) gate_logits: (sequence_length, n_experts) """ # optional reshape input_shape = x.shape x = x.view(-1, input_shape[-1]) # gate_logits: (sequence_length, n_experts) gate_logits = self.gate(x) # all_probs: (sequence_length, n_experts) and upcast for softmax weights = torch.nn.functional.softmax(gate_logits, dim=1, dtype=torch.float) if self.top_k < self.num_experts: _, not_selected_experts = torch.topk( weights, self.num_experts - self.top_k, largest=False, sorted=False, dim=1, ) # Mask not selected experts weights.scatter_(1, not_selected_experts, 0) # Re-normalize if self.moe_normalize_expert_weights: weights = weights / torch.norm( weights, p=self.moe_normalize_expert_weights, dim=-1, keepdim=True ) weights = weights.to(x.dtype) # Final output tensor out = x.new_zeros(x.shape[0], self.hidden_dim) for i in range(self.num_experts): h = self.act(self.w1[i](x)) * self.v1[i](x) h = self.w2[i](h, reduce=False) # Add expert output to out with masking out += h * weights[:, i].view(-1, 1) # Reduce sum if self.process_group.size() > 1: torch.distributed.all_reduce(out, group=self.process_group) return out class DbrxLayer(nn.Module): def __init__(self, prefix: str, layer_id, config, weights, rotary_emb): super().__init__() prefix = f"{prefix}.blocks.{layer_id}" self.attn = DbrxNormAttentionNorm( prefix=f"{prefix}.norm_attn_norm", config=config, weights=weights, rotary_emb=rotary_emb, ) moe_cls = BlockSparseMoE if config.quantize is None else DenseMoE self.moe = moe_cls(f"{prefix}.ffn", config, weights) def forward( self, hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache, slots, seqlen, hpu_attention_meta, ): # Self Attention attn_output, attn_res = self.attn( hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache, slots, seqlen, hpu_attention_meta, ) moe_output = self.moe(attn_output) return moe_output, attn_res class DbrxModel(torch.nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() self.embed_tokens = TensorParallelEmbedding( prefix=f"{prefix}.wte", weights=weights ) rotary_emb = PositionRotaryEmbedding.static( config=config, dim=config.d_model // config.n_heads, base=config.attn_config.rope_theta, device=weights.device, ) self.layers = nn.ModuleList( [ DbrxLayer( prefix, layer_id, config, weights, rotary_emb, ) for layer_id in range(config.n_layers) ] ) self.norm = FastLayerNorm.load_no_bias( prefix=f"{prefix}.norm_f", weights=weights, eps=1e-5 ) self.head_size = self.layers[0].attn.self_attn.head_size self.num_heads = self.layers[0].attn.self_attn.num_heads self.num_key_value_heads = self.layers[0].attn.self_attn.num_key_value_heads def forward( self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], slots: torch.Tensor, seqlen: Seqlen, hpu_attention_meta: Optional[HPUPagedAttentionMetadata], ) -> torch.Tensor: if hpu_attention_meta is not None: hpu_attention_meta = set_block_mapping( hpu_attention_meta, input_ids.shape[0] ) hidden_states = self.embed_tokens(input_ids) # Get rotary cos and sin for this forward # Avoid to index in each layer cos, sin = self.layers[0].attn.self_attn.rotary_emb.get_cos_sin(position_ids) residual = None lazy_mode = htorch.utils.internal.is_lazy() if lazy_mode: htorch.core.mark_step() for i, layer in enumerate(self.layers): hidden_states, residual = layer( hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache[i], slots, seqlen, hpu_attention_meta, ) if lazy_mode: htorch.core.mark_step() hidden_states, _ = self.norm(hidden_states, residual) return hidden_states class FlashDbrxForCausalLM(torch.nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() if not prefix: prefix = "transformer" else: prefix = f"{prefix}.transformer" self.model = DbrxModel(prefix, config, weights) self.lm_head = SpeculativeHead.load( config, prefix="lm_head", weights=weights, ) def forward( self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], slots: torch.Tensor, seqlen: Seqlen, hpu_attention_meta: Optional[HPUPagedAttentionMetadata], lm_head_indices: Optional[torch.Tensor] = None, adapter_data: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: hidden_states = self.model( input_ids, position_ids, cu_seqlen_prefill, kv_cache, slots, seqlen, hpu_attention_meta, ) if lm_head_indices is not None: hidden_states = hidden_states[lm_head_indices] logits, speculative_logits = self.lm_head(hidden_states) return logits, speculative_logits
text-generation-inference/backends/gaudi/server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py/0
{ "file_path": "text-generation-inference/backends/gaudi/server/text_generation_server/models/custom_modeling/flash_dbrx_modeling.py", "repo_id": "text-generation-inference", "token_count": 12281 }
283
import torch import torch.distributed from torch import nn from transformers.activations import ACT2FN from transformers.configuration_utils import PretrainedConfig from typing import Optional, List, Tuple from text_generation_server.layers.attention import ( paged_attention, attention, set_block_mapping, Seqlen, HPUPagedAttentionMetadata, ) from text_generation_server.layers import ( TensorParallelRowLinear, TensorParallelColumnLinear, TensorParallelEmbedding, SpeculativeHead, get_linear, ) from text_generation_server.layers.attention.kv_cache import get_kv_scales from text_generation_server.layers.layernorm import ( FastLayerNorm, ) from text_generation_server.layers.rotary import ( PositionRotaryEmbedding, ) import habana_frameworks.torch as htorch class PhiConfig(PretrainedConfig): def __init__( self, vocab_size=51200, hidden_size=2560, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=32, hidden_act="gelu_fast", # llama uses silu layer_norm_eps=1e-05, # rms in llama, pad_token_id=0, bos_token_id=1, eos_token_id=2, tie_word_embeddings=False, rope_theta=10000.0, resid_pdrop=0.1, # llama doesn't have this partial_rotary_factor=0.5, # important difference between llama and phi **kwargs, ): self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps self.rope_theta = rope_theta self.resid_pdrop = resid_pdrop self.partial_rotary_factor = partial_rotary_factor super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, ) # this is the same as llama except for Phi uses bias=True def load_attention(config, prefix, weights): if config.num_attention_heads != config.num_key_value_heads: return _load_gqa(config, prefix, weights) else: return TensorParallelColumnLinear.load_multi( config, prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"], dim=0, weights=weights, bias=True, ) def _load_gqa(config, prefix: str, weights): assert config.hidden_size % config.num_attention_heads == 0 assert config.num_attention_heads % weights.process_group.size() == 0 weight = weights.get_multi_weights_col( prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"], dim=0, ) if config.quantize not in ["gptq", "awq"]: weight = weight.to(dtype=weights.dtype).to(device=weights.device) head_size = config.hidden_size // config.num_attention_heads num_heads = config.num_attention_heads // weights.process_group.size() num_key_value_heads = config.num_key_value_heads // weights.process_group.size() assert list(weight.shape) == [ (num_heads + 2 * num_key_value_heads) * head_size, config.hidden_size, ], f"{list(weight.shape)} != {[(num_heads + 2 * config.num_key_value_heads) * head_size, config.hidden_size]}" # this is the same as llama except for Phi uses bias=True return TensorParallelColumnLinear(get_linear(weight, bias=True)) class FlashPhiAttention(torch.nn.Module): def __init__( self, prefix: str, config, weights, rotary_emb, ): super().__init__() self.num_heads = config.num_attention_heads self.hidden_size = config.hidden_size self.head_size = self.hidden_size // self.num_heads self.softmax_scale = self.head_size**-0.5 self.rotary_dim = int(config.partial_rotary_factor * self.head_size) self.rotary_emb = rotary_emb if self.num_heads % weights.process_group.size() != 0: raise ValueError( f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} " f"and `num_shards`: {weights.process_group.size()}" ) self.num_heads = self.num_heads // weights.process_group.size() self.num_key_value_heads = ( config.num_key_value_heads // weights.process_group.size() ) self.query_key_value = load_attention(config, prefix, weights) self.kv_scales = get_kv_scales(weights, f"{prefix}") # in llama the dense layer is called "o_proj" and has bias=False self.dense = TensorParallelRowLinear.load( config, prefix=f"{prefix}.dense", weights=weights, bias=True, ) self.num_groups = self.num_heads // self.num_key_value_heads self.kv_head_mapping = torch.arange( 0, self.num_key_value_heads, dtype=torch.int32, device=weights.device ).repeat_interleave(self.num_groups) def forward( self, hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, slots, seqlen, hpu_attention_meta, ): # Compute query, key, value and split qkv = self.query_key_value(hidden_states) query, kv = qkv.split( [ self.head_size * self.num_heads, 2 * self.head_size * self.num_key_value_heads, ], dim=1, ) # Reshape query and key for rotary embeddings query = query.view(-1, self.num_heads, self.head_size) kv = kv.view(-1, 2, self.num_key_value_heads, self.head_size) # NOTE: this is the main difference between Llama and Phi # in llama the rotary embeddings are applied to the whole query and key. # Phi uses PARTIAL rotary embeddings, which are applied to the first 32 dimensions # # Apply partial positional embeddings in place self.rotary_emb( query[:, :, : self.rotary_dim], kv[:, 0, :, : self.rotary_dim], cos, sin ) # Reshape key and value and cache kv_cache.store( key=kv[:, 0], value=kv[:, 1], slots=slots, kv_scales=self.kv_scales, ) # Prefill if cu_seqlen_prefill is not None: attn_output = attention( query=query, key=kv[:, 0], value=kv[:, 1], kv_scales=self.kv_scales, kv_cache=kv_cache, seqlen=seqlen, softmax_scale=self.softmax_scale, ) # Decode else: attn_output = paged_attention( query, kv_cache, self.kv_head_mapping, self.softmax_scale, seqlen, kv_scales=self.kv_scales, hpu_attention_meta=hpu_attention_meta, ) return self.dense(attn_output.view(-1, self.num_heads * self.head_size)) class PhiMLP(nn.Module): def __init__(self, prefix, config, weights): super().__init__() act = config.hidden_act self.act = ( ACT2FN[act] if "gelu" not in act else lambda x: torch.nn.functional.gelu( x, approximate=( "tanh" if act in ["gelu_fast", "gelu_pytorch_tanh"] else "none" ), ) ) # llama weights are up_proj and down_proj and bias=False self.up_proj = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.fc1", weights=weights, bias=True, ) self.down_proj = TensorParallelRowLinear.load( config, prefix=f"{prefix}.fc2", weights=weights, bias=True, ) def forward(self, hidden_states): # NOTE: Llama requires the gate up states to an intermediate size # Phi does not and we can avoid the `view` operation return self.down_proj(self.act(self.up_proj(hidden_states))) class FlashPhiLayer(nn.Module): def __init__(self, prefix: str, layer_id, config, weights, rotary_emb): super().__init__() prefix = f"{prefix}.layers.{layer_id}" self.self_attn = FlashPhiAttention( prefix=f"{prefix}.self_attn", config=config, weights=weights, rotary_emb=rotary_emb, ) self.mlp = PhiMLP(prefix=f"{prefix}.mlp", config=config, weights=weights) self.input_layernorm = FastLayerNorm.load( prefix=f"{prefix}.input_layernorm", weights=weights, eps=config.layer_norm_eps, ) self.resid_dropout = torch.nn.Dropout(config.resid_pdrop) def forward( self, hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache, slots, seqlen, hpu_attention_meta, ): hidden_states, res = self.input_layernorm(hidden_states, residual) # Self Attention attn_output = self.self_attn( hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, slots, seqlen, hpu_attention_meta, ) hidden_states = self.resid_dropout(attn_output).add( self.resid_dropout(self.mlp(hidden_states)) ) return hidden_states, res class FlashPhiModel(torch.nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() process_group = weights.process_group self.tp_rank = process_group.rank() self.tp_world_size = process_group.size() self.embed_tokens = TensorParallelEmbedding( prefix=f"{prefix}.embed_tokens", weights=weights ) rotary_emb = PositionRotaryEmbedding.static( config=config, dim=int( config.partial_rotary_factor * (config.hidden_size // config.num_attention_heads) ), base=config.rope_theta, device=weights.device, ) self.layers = nn.ModuleList( [ FlashPhiLayer( prefix, layer_id, config, weights, rotary_emb, ) for layer_id in range(config.num_hidden_layers) ] ) self.gradient_checkpointing = False self.head_size = self.layers[0].self_attn.head_size self.num_heads = self.layers[0].self_attn.num_heads self.num_key_value_heads = self.layers[0].self_attn.num_key_value_heads self.norm = FastLayerNorm.load( prefix="model.final_layernorm", weights=weights, eps=config.layer_norm_eps, ) def forward( self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], slots: torch.Tensor, seqlen: Seqlen, hpu_attention_meta: Optional[HPUPagedAttentionMetadata], ) -> torch.Tensor: if hpu_attention_meta is not None: hpu_attention_meta = set_block_mapping( hpu_attention_meta, input_ids.shape[0] ) hidden_states = self.embed_tokens(input_ids) # Get rotary cos and sin for this forward # Avoid to index in each layer cos, sin = self.layers[0].self_attn.rotary_emb.get_cos_sin(position_ids) residual = None lazy_mode = htorch.utils.internal.is_lazy() if lazy_mode: htorch.core.mark_step() for i, layer in enumerate(self.layers): hidden_states, residual = layer( hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache[i], slots, seqlen, hpu_attention_meta, ) if lazy_mode: htorch.core.mark_step() hidden_states, _ = self.norm(hidden_states, residual) return hidden_states class FlashPhiForCausalLM(torch.nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() if not prefix: prefix = "model" else: prefix = f"{prefix}.model" self.model = FlashPhiModel(prefix, config, weights) self.lm_head = SpeculativeHead.load( config, prefix="lm_head", weights=weights, ) def forward( self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], slots: torch.Tensor, seqlen: Seqlen, hpu_attention_meta: Optional[HPUPagedAttentionMetadata], lm_head_indices: Optional[torch.Tensor] = None, adapter_data: Optional[torch.Tensor] = None, ) -> torch.Tensor: hidden_states = self.model( input_ids, position_ids, cu_seqlen_prefill, kv_cache, slots, seqlen, hpu_attention_meta, ) if lm_head_indices is not None: hidden_states = hidden_states[lm_head_indices] return self.lm_head(hidden_states)
text-generation-inference/backends/gaudi/server/text_generation_server/models/custom_modeling/flash_phi_modeling.py/0
{ "file_path": "text-generation-inference/backends/gaudi/server/text_generation_server/models/custom_modeling/flash_phi_modeling.py", "repo_id": "text-generation-inference", "token_count": 7035 }
284
import torch from PIL import Image from io import BytesIO from dataclasses import dataclass from opentelemetry import trace from typing import Iterable, Optional, Tuple, List, Type, Dict from transformers import PreTrainedTokenizerBase from transformers.image_processing_utils import select_best_resolution from text_generation_server.pb import generate_pb2 from text_generation_server.models.flash_causal_lm import ( FlashCausalLMBatch, FlashCausalLM, generate_block_metadata, ) from text_generation_server.models.globals import PREFIX_CACHING, BLOCK_SIZE from loguru import logger from text_generation_server.utils.log import log_master from transformers import AutoProcessor from text_generation_server.layers.attention import ( Seqlen, trim_seqlen_metadata, _async_h2d_tensor_copy, HPUPagedAttentionMetadata, trim_attn_metadata, ) import habana_frameworks.torch as htorch import time from text_generation_server.utils.import_utils import ( synchronize, ) from vllm_hpu_extension.profiler import HabanaMemoryProfiler, format_bytes tracer = trace.get_tracer(__name__) IDEFICS2_FAKE_TOKEN = "<fake_token_around_image>" IDEFICS2_IMAGE_TOKEN = "<image>" IDEFICS3_IMAGE_TOKEN = "<image>" IDEFICS3_FAKE_IMAGE_TOKEN = "<fake_token_around_image>" IDEFICS3_GLOBAL_IMG_TOKEN = "<global-img>" def prompt_split_image_llama4(aspect_ratio, num_patches_per_chunk): """ Create a structured string representation of image tokens Args: num_patches: Number of patches in the image Returns: String with appropriate image tokens """ img_string = "<|image_start|>" ratio_h, ratio_w = aspect_ratio if ratio_h * ratio_w > 1: for yy in range(ratio_h): for xx in range(ratio_w): img_string += "<|patch|>" * num_patches_per_chunk if xx < ratio_w - 1: img_string += "<|tile_x_separator|>" img_string += "<|tile_y_separator|>" img_string += "<|image|>" img_string += "<|patch|>" * num_patches_per_chunk img_string += "<|image_end|>" return img_string # copied from: https://github.com/huggingface/transformers/blob/02ed609285c2448b3b54c31e362f2c389fa952ab/src/transformers/models/idefics3/processing_idefics3.py#L44-L60 def _prompt_split_image( *, image_seq_len: int, image_rows: int, image_cols: int, fake_token_around_image: str, image_token: str, global_img_token: str, ): """Prompt with expanded image tokens for when the image is split into patches.""" text_split_images = "" for n_h in range(image_rows): for n_w in range(image_cols): text_split_images += ( f"{fake_token_around_image}" + f"<row_{n_h + 1}_col_{n_w + 1}>" + f"{image_token}" * image_seq_len ) text_split_images += "\n" text_split_images += ( f"\n{fake_token_around_image}" + f"{global_img_token}" + f"{image_token}" * image_seq_len + f"{fake_token_around_image}" ) return text_split_images def get_anyres_image_grid_shape(image_size, grid_pinpoints, patch_size): """ Calculate the shape of the image patch grid after the preprocessing for images of any resolution. Args: image_size (`tuple`): The size of the input image in the format (height, width). grid_pinpoints (`List`): A list containing possible resolutions. Each item in the list should be a tuple or list of the form `(height, width)`. patch_size (`int`): The size of each image patch. Returns: tuple: The shape of the image patch grid in the format (width, height). """ if not isinstance(grid_pinpoints, list): raise ValueError("grid_pinpoints should be a list of tuples or lists") height, width = select_best_resolution(image_size, grid_pinpoints) return height // patch_size, width // patch_size def image_text_replacement(processor, image_input, config) -> str: if config.model_type == "idefics2": image_seq_len = 64 image_str = f"{IDEFICS2_FAKE_TOKEN}{IDEFICS2_IMAGE_TOKEN * image_seq_len}{IDEFICS2_FAKE_TOKEN}" if processor.image_processor.do_image_splitting: image_str *= 5 return image_str, IDEFICS2_FAKE_TOKEN if config.model_type == "idefics3": # TODO: implement this in a more general way n_rows = image_input["rows"][0][0] n_cols = image_input["cols"][0][0] image_seq_len = int( ((config.vision_config.image_size // config.vision_config.patch_size) ** 2) / (config.scale_factor**2) ) image_str = _prompt_split_image( image_seq_len=image_seq_len, image_rows=n_rows, image_cols=n_cols, fake_token_around_image=IDEFICS3_FAKE_IMAGE_TOKEN, image_token=IDEFICS3_IMAGE_TOKEN, global_img_token=IDEFICS3_GLOBAL_IMG_TOKEN, ) return image_str, IDEFICS3_FAKE_IMAGE_TOKEN elif config.model_type == "llava_next": height, width = image_input["image_sizes"][0] num_features = get_number_of_features(height, width, config) log_master( logger.info, f"Found {num_features} features in image of resolution {height}x{width}", ) return "<image>" * num_features, "<image>" elif config.model_type == "paligemma": return "<image>" * config.text_config.num_image_tokens, "<image>" elif config.model_type == "qwen2_vl": grid_t, grid_h, grid_w = image_input["image_grid_thw"][0] num_pads = grid_t * grid_h * grid_w // 4 padding = "<|image_pad|>" * num_pads return f"<|vision_start|>{padding}<|vision_end|>", "<|vision_start|>" elif config.model_type == "qwen2_5_vl": grid_t, grid_h, grid_w = image_input["image_grid_thw"][0] num_pads = grid_t * grid_h * grid_w // 4 padding = "<|image_pad|>" * num_pads return f"<|vision_start|>{padding}<|vision_end|>", "<|vision_start|>" elif config.model_type == "gemma3": # TODO: get correct number of features via reviewing the Gemma3 architecture # and calculating the number of image tokens num_pads = 256 padding = "<image_soft_token>" * num_pads return f"\n\n<start_of_image>{padding}<end_of_image>\n\n", "<start_of_image>" elif config.model_type == "llama4": patch_size = config.vision_config.patch_size pixel_shuffle_ratio = config.vision_config.pixel_shuffle_ratio downsample_ratio = int(round(1.0 / (pixel_shuffle_ratio**2))) aspect_ratios = image_input["aspect_ratios"][0] image_height, image_width = image_input["pixel_values"][0].shape[-2:] num_patches_per_chunk = int( (image_height // patch_size) * (image_width // patch_size) // downsample_ratio ) tokens_for_this_image = prompt_split_image_llama4( aspect_ratios, num_patches_per_chunk ) return tokens_for_this_image, "<|image_start|>" else: raise RuntimeError(f"Unknown config {config.model_type} for multimodal") def image_text_replacement_fixup(config, text: str) -> str: if config.model_type == "idefics2": return text.replace( f"{IDEFICS2_FAKE_TOKEN}{IDEFICS2_FAKE_TOKEN}", IDEFICS2_FAKE_TOKEN ) return text def preprocess_text(config, text: str) -> str: if config.model_type == "paligemma": return "<bos>" + text + "\n" return text def preprocess_image(config, img): model_type = config.model_type if model_type in {"qwen2_vl", "qwen2_5_vl"} and img.width <= 20: img = img.resize((img.width * 2, img.height * 2)) if model_type == "paligemma": img = img.convert("RGB") if model_type not in {"llava_next", "gemma3", "llama4"}: # TODO: check if this is needed img = [img] return img def get_unpadded_features( original_height: int, original_width: int, npatches: int, num_patch_height: int, num_patch_width: int, ) -> Tuple[int, int]: current_height = npatches * num_patch_height current_width = npatches * num_patch_width aspect_ratio: float = original_width / original_height current_aspect_ratio: float = current_width / current_height if aspect_ratio > current_aspect_ratio: new_height = (original_height * current_width) // original_width padding = (current_height - new_height) // 2 current_height = current_height - (2 * padding) else: new_width = (original_width * current_height) // original_height padding = (current_width - new_width) // 2 current_width = current_width - (2 * padding) unpadded_features = current_height * current_width newline_features = current_height return (unpadded_features, newline_features) def get_number_of_features(height: int, width: int, config) -> int: # From config # Hardcoded for CLIP for now # image_grid_pinpoints = [[336, 672], [672, 336], [672, 672], [1008, 336], [336, 1008]] image_grid_pinpoints = config.image_grid_pinpoints image_size = config.vision_config.image_size patch_size = config.vision_config.patch_size assert image_size % patch_size == 0 npatches = image_size // patch_size # Dimensions are intentionally swapped to be bug-compatible with # upstream: https://github.com/LLaVA-VL/LLaVA-NeXT/issues/59 num_patch_width, num_patch_height = get_anyres_image_grid_shape( [height, width], image_grid_pinpoints, image_size, ) unpadded_features, newline_features = get_unpadded_features( height, width, npatches, num_patch_height, num_patch_width ) # The base patch covers the entire image base_features = npatches**2 return unpadded_features + newline_features + base_features def scatter_image_embeds( embeds: torch.Tensor, is_embed: Optional[torch.Tensor] ) -> torch.Tensor: if is_embed is None: return embeds placeholders = embeds.new_full( (is_embed.shape[0], embeds.shape[-1]), fill_value=torch.nan, ) placeholders[is_embed.to(embeds.device)] = embeds return placeholders def gather_image_embeds( embeds: torch.Tensor, is_embed: Optional[torch.Tensor] ) -> Optional[torch.Tensor]: if is_embed is None: return embeds sel = embeds[is_embed.to(embeds.device)] return sel if sel.numel() else None @dataclass class ImagePositions: offset: int length: int id: int num_placeholder_tokens: int is_embed: Optional[torch.Tensor] = None class FlashVlmCausalLMBatch(FlashCausalLMBatch): image_inputs: Optional[List[List[Dict[str, torch.Tensor]]]] image_positions: Optional[List[List[ImagePositions]]] encoder_cache: Optional[List[Dict[int, torch.Tensor]]] pixel_values: Optional[List[torch.Tensor]] pixel_attention_mask: Optional[List[torch.Tensor]] image_sizes: Optional[List[Tuple[int, int]]] image_grid_thw: Optional[torch.Tensor] cache_entries_to_free: List[Tuple[int, int]] has_image_inputs: bool = False inputs_embeds: Optional[torch.Tensor] = None @classmethod @tracer.start_as_current_span("concatenate") def concatenate(cls, batches, padded_total_bs: int = 0): batch = super(FlashVlmCausalLMBatch, cls).concatenate(batches, padded_total_bs) batch.image_inputs = [] batch.image_positions = [] batch.encoder_cache = [] for b in batches: if b.image_inputs is not None: batch.image_inputs.extend(b.image_inputs) else: batch.image_inputs.append(None) if b.image_positions is not None: batch.image_positions.extend(b.image_positions) else: batch.image_positions.append(None) if b.encoder_cache is not None: batch.encoder_cache.extend(b.encoder_cache) else: batch.encoder_cache.append(None) batch.pixel_values = None batch.pixel_attention_mask = None batch.image_sizes = None batch.image_grid_thw = None batch.inputs_embeds = None # To be filled in prepare_for_prefill batch.has_image_inputs = False batch.cache_entries_to_free = [] return batch @tracer.start_as_current_span("filter") def filter(self, request_ids: List[int]): if len(request_ids) == 0: raise ValueError("Batch must have at least one request") image_inputs = [] image_positions = [] encoder_cache = [] for request_id in request_ids: idx = self.requests_idx_mapping[request_id] image_inputs.append(self.image_inputs[idx]) image_positions.append(self.image_positions[idx]) encoder_cache.append(self.encoder_cache[idx]) batch = super().filter(request_ids) batch.pixel_values = None batch.pixel_attention_mask = None batch.image_sizes = None batch.image_grid_thw = None batch.inputs_embeds = None batch.image_inputs = image_inputs batch.image_positions = image_positions batch.encoder_cache = encoder_cache # To be filled in prepare_for_prefill batch.has_image_inputs = False batch.cache_entries_to_free = [] return batch @classmethod def batch_tokenized_inputs( cls, requests: Iterable[generate_pb2.Request], tokenizer, processor, config ): kwargs = {} if ( hasattr(processor, "image_processor_class") and processor.image_processor_class == "Idefics3ImageProcessor" ): kwargs["return_row_col_info"] = True max_length = 0 vocab = tokenizer.get_vocab() if not hasattr(config, "image_token_index"): config.image_token_index = config.image_token_id batch_tokenized_inputs: List[List[int]] = [] batch_image_inputs: List[Optional[List[dict]]] = [] batch_image_positions: List[Optional[List[ImagePositions]]] = [] for r in requests: text_parts = [] image_inputs = [] image_texts = [] image_id = 0 for chunk in r.input_chunks.chunks: chunk_type = chunk.WhichOneof("chunk") if chunk_type == "text": text = preprocess_text(config, chunk.text) text_parts.append(text) elif chunk_type == "image": img = Image.open(BytesIO(chunk.image.data)) img = preprocess_image(config, img) image_input = processor.image_processor( [img], return_tensors="pt", **kwargs ) image_inputs.append(image_input) img_text, img_start_token_str = image_text_replacement( processor, image_input, config ) text_parts.append(img_text) image_texts.append([image_id, img_start_token_str, img_text]) image_id += 1 else: raise RuntimeError(f"Invalid chunk type {chunk_type}") full_text = image_text_replacement_fixup(config, "".join(text_parts)) input_ids = tokenizer( full_text, truncation=True, max_length=r.truncate, add_special_tokens=( r.add_special_tokens if config.model_type != "paligemma" else False ), )["input_ids"] max_length = max(max_length, len(input_ids)) if len(image_inputs) > 0: img_start_token = vocab[image_texts[0][1]] image_positions = cls.get_image_positions( input_ids, image_texts, img_start_token, config, tokenizer ) else: image_inputs = None image_positions = None batch_tokenized_inputs.append(input_ids) batch_image_inputs.append(image_inputs) batch_image_positions.append(image_positions) return batch_tokenized_inputs, batch_image_inputs, batch_image_positions @classmethod def get_image_positions( cls, input_ids: List[int], image_texts: List[Tuple[int, str, str]], img_start_token: int, config, tokenizer: PreTrainedTokenizerBase, ) -> List[ImagePositions]: image_positions = [] num_images = len(image_texts) input_ids_t = torch.as_tensor(input_ids) img_start_token_pos = torch.where(input_ids_t.eq(img_start_token))[0] num_tokens = input_ids_t.numel() last_pos = 0 for i in range(num_images): image_id, img_start_token_str, img_text = image_texts[i] img_text = image_text_replacement_fixup(config, img_text) if config.model_type == "gemma3": img_text = img_text.replace("\n\n", "") tokens = tokenizer(img_text, add_special_tokens=False, return_tensors="pt")[ "input_ids" ][0] length = tokens.numel() assert ( length <= num_tokens ), f"{length} > {num_tokens} Image is truncated, try increasing --max-batch-prefill-tokens" pos = torch.searchsorted(img_start_token_pos, last_pos, right=False) index = img_start_token_pos[pos] assert torch.equal( input_ids_t[index : index + length], tokens ), "Image tokens not found in input_ids" is_embed = tokens == config.image_token_index num_placeholder_tokens = int(is_embed.sum()) if num_placeholder_tokens == length: is_embed = None pos = ImagePositions( offset=index, length=length, id=image_id, num_placeholder_tokens=num_placeholder_tokens, is_embed=is_embed, ) image_positions.append(pos) last_pos = index + length if ( config.model_type == "idefics2" and i + 1 != num_images and input_ids[last_pos] == config.image_token_index ): fake_token = last_pos - 1 fake_token_index = torch.searchsorted( img_start_token_pos, fake_token, right=False ) img_start_token_pos[fake_token_index] = last_pos image_texts[i + 1][2] = image_texts[i + 1][2][ len(img_start_token_str) : ] return image_positions @classmethod def from_pb_processor( cls, pb: generate_pb2.Batch, tokenizer: PreTrainedTokenizerBase, processor, config, dtype: torch.dtype, device: torch.device, ) -> "FlashVlmCausalLMBatch": batch_tokenized_inputs, image_inputs, image_positions = ( cls.batch_tokenized_inputs(pb.requests, tokenizer, processor, config) ) batch = cls.from_tokenized(pb, tokenizer, batch_tokenized_inputs, dtype, device) batch.image_inputs = image_inputs batch.image_positions = image_positions batch.encoder_cache = [{} for _ in range(len(pb.requests))] if len(image_inputs): batch.pixel_values = None batch.pixel_attention_mask = None batch.image_sizes = None batch.image_grid_thw = None return batch def prepare_for_prefill( self, max_padded_input_len, max_padded_bs, max_total_tokens, pad_token_id ): super().prepare_for_prefill( max_padded_input_len, max_padded_bs, max_total_tokens, pad_token_id ) self.has_image_inputs = False self.cache_entries_to_free = [] self.pixel_values = [] assert ( len(self.cache_lengths) == len(self.input_lengths) == len(self.prefilling_mask) ), "Mismatch in lengths of cache_lengths, input_lengths, and prefilling_mask" for i, ( cache_length, input_length, request_prefilling, ) in enumerate( zip( self.cache_lengths, self.input_lengths, self.prefilling_mask, ) ): if not request_prefilling or self.image_positions[i] is None: continue for image_position in self.image_positions[i]: if image_position is None: continue start_pos = image_position.offset length = image_position.length if start_pos >= cache_length + input_length: # No encoder input required at this step break if start_pos + length <= cache_length: # The encode input is already processed continue self.has_image_inputs = True if image_position.id not in self.encoder_cache[i]: image_inputs = self.image_inputs[i][image_position.id] self.pixel_values.append((i, image_position.id, image_inputs)) # Remove the image from the image_inputs self.image_inputs[i][image_position.id] = None if not self.has_image_inputs: self.pixel_values = None self.pixel_attention_mask = None self.image_sizes = None self.image_grid_thw = None else: image_grid_thw_list = [ x[2]["image_grid_thw"] for x in self.pixel_values if "image_grid_thw" in x[2] ] if image_grid_thw_list: self.image_grid_thw = torch.cat(image_grid_thw_list, dim=0) else: self.image_grid_thw = None def update_encoder_cache(self, encoder_outputs, request_id, img_pos): self.encoder_cache[request_id][img_pos.id] = scatter_image_embeds( encoder_outputs, img_pos.is_embed ) def gather_vision_embeds(self): device = self.input_ids.device chunks = [] for ( i, cache_length, input_length, request_prefilling, ) in zip( range(len(self.requests)), self.cache_lengths, self.input_lengths, self.prefilling_mask, ): if not request_prefilling or self.image_positions[i] is None: continue for image_position in self.image_positions[i]: if image_position is None: continue start_pos = image_position.offset length = image_position.length if start_pos >= cache_length + input_length: # No encoder input required at this step break if start_pos + length <= cache_length: # The encode input is already processed continue start_idx = max(cache_length - start_pos, 0) end_idx = min(cache_length - start_pos + input_length, length) assert ( image_position.id in self.encoder_cache[i] ), f"image_id {image_position.id} not in encoder_cache {self.encoder_cache[i]}" encoder_output = self.encoder_cache[i][image_position.id] is_embed = image_position.is_embed if is_embed is not None: is_embed = is_embed[start_idx:end_idx] from loguru import logger logger.info( f"image_id {image_position.id} start_idx {start_idx} end_idx {end_idx}, length {length}" ) embeds = gather_image_embeds( encoder_output[start_idx:end_idx], is_embed=is_embed, ) if embeds is not None: chunks.append(embeds) if end_idx == length: self.cache_entries_to_free.append((i, image_position.id)) self.image_positions[i][image_position.id] = None if len(chunks) == 0: return None return torch.cat(chunks, dim=0).to(device) def free_encoder_cache(self): for i, image_id in self.cache_entries_to_free: self.encoder_cache[i].pop(image_id, None) self.cache_entries_to_free = [] class FlashVlmCausalLM(FlashCausalLM): def __init__( self, model_id: str, *, processor_class=AutoProcessor, processor_kwargs=None, batch_class=FlashVlmCausalLMBatch, revision, trust_remote_code: bool, support_chunking: bool = False, **kwargs, ): if PREFIX_CACHING: raise NotImplementedError("Vlm do not work with prefix caching yet") if processor_kwargs is None: processor_kwargs = {} self.processor = processor_class.from_pretrained( model_id, revision=revision, trust_remote_code=trust_remote_code, **processor_kwargs, ) self.batch_class = batch_class super().__init__( model_id=model_id, revision=revision, trust_remote_code=trust_remote_code, support_chunking=support_chunking, **kwargs, ) @property def batch_type(self) -> Type[FlashVlmCausalLMBatch]: return self.batch_class def max_past(self) -> Optional[int]: return getattr(self.model.text_model, "max_past", None) def warmup_decode( self, batch_size: int, block_num: int, batch: FlashVlmCausalLMBatch ): input_ids = torch.zeros(batch_size, dtype=batch.input_ids.dtype) position_ids = torch.arange(batch_size, dtype=batch.position_ids.dtype) if batch.position_ids is not None and batch.position_ids.dim() == 2: # qwen2_vl and qwen2_5_vl case position_ids = position_ids.unsqueeze(-1).repeat( (1, batch.position_ids.shape[-1]) ) blocks = [block_num // batch_size for _ in range(batch_size)] blocks[0] += block_num % batch_size block_tables = [] slots = [] start_idx = 0 slot_indices = [] # fetch the last blocked to warmup block num for i in range(batch_size): block_array = list(range(start_idx, start_idx + blocks[i])) slots.append(BLOCK_SIZE * block_array[-1] + BLOCK_SIZE - 1) block_tables.append(block_array) slot_indices.append((start_idx + blocks[i]) * BLOCK_SIZE - 1) start_idx += blocks[i] input_lengths = torch.ones(batch_size, dtype=torch.int32) seqlen = Seqlen( input_lengths=_async_h2d_tensor_copy(input_lengths), ) block_list, block_groups, block_usage, _, block_bucket_size = ( generate_block_metadata( self.dtype, self.use_contiguous_pa, slots, block_tables, self.bucketing_ctx, ) ) meta = HPUPagedAttentionMetadata( block_list=_async_h2d_tensor_copy(block_list), block_groups=_async_h2d_tensor_copy(block_groups), block_usage=_async_h2d_tensor_copy(block_usage), block_mapping=None, attn_bias=None, ) if self.sliding_window is not None: block_tables_in_window = [] for i, bt in enumerate(block_tables): block_num_in_window = ( self.sliding_window + BLOCK_SIZE - 1 ) // BLOCK_SIZE block_tables_in_window.append( bt[max(0, blocks[i] - block_num_in_window) : blocks[i]] ) slots_in_window = [] start_idx = 0 for i, indice in enumerate(slot_indices): mask = ( indice - torch.arange(start_idx, indice + 1) ) < self.sliding_window slots_in_window.append(torch.arange(start_idx, indice + 1)[mask]) start_idx += blocks[i] * BLOCK_SIZE slots_in_window = torch.cat(slots_in_window, dim=0) ( block_list_in_window, block_groups_in_window, block_usage_in_window, slots_in_window_mask, _, ) = generate_block_metadata( self.dtype, self.use_contiguous_pa, slots, block_tables_in_window, self.bucketing_ctx, slots_in_window, block_bucket_size, ) meta.block_list_in_window = _async_h2d_tensor_copy(block_list_in_window) meta.block_groups_in_window = _async_h2d_tensor_copy(block_groups_in_window) meta.block_usage_in_window = _async_h2d_tensor_copy(block_usage_in_window) meta.slots_in_window_mask = _async_h2d_tensor_copy(slots_in_window_mask) hpu_attention_meta = trim_attn_metadata(meta) slots_tensor = torch.tensor(slots, dtype=batch.slots.dtype) inputs_embeds = self.get_inputs_embeds( input_ids=input_ids.to(self.device), ) # We pass a `cu_seqlen_prefill` in order not to have to deal with paged attention cache allocation/deallocation. self.model.forward( inputs_embeds=inputs_embeds, position_ids=_async_h2d_tensor_copy(position_ids), cu_seqlen_prefill=None, kv_cache=self.kv_cache, slots=_async_h2d_tensor_copy(slots_tensor), seqlen=trim_seqlen_metadata(seqlen), hpu_attention_meta=hpu_attention_meta, lm_head_indices=None, attention_mask=None, ) def warmup_hpu_graph(self, batch: FlashVlmCausalLMBatch): free_mem = HabanaMemoryProfiler.current_free_device_memory() graph_free_mem = free_mem - self.mem_reserved graph_free_mem = self.align_workers( graph_free_mem, torch.distributed.ReduceOp.MIN ) decode_available_memory = graph_free_mem msg = ( f"Using {format_bytes(graph_free_mem)}" f"/{format_bytes(free_mem)} " "of free device memory for HPUGraphs, " f"{format_bytes(decode_available_memory)} for decode " ) log_master(logger.info, msg) start_time = time.time() warmup_shape_count = 0 warmup_times = 3 # only warmup decode, for prefill, image pixal size may change, make the warmup useless def ordering_function_max_bs(b): return (-b[0], b[1]) self.bucketing_ctx.generate_decode_buckets(self.bucketing_ctx.num_hpu_blocks) buckets = list( sorted(self.bucketing_ctx.decode_buckets, key=ordering_function_max_bs) ) total_batch_seq = 0.001 total_mem = 0 available_mem = decode_available_memory log_master( logger.info, f"Decode batch size list:{[bsz[0] for bsz in buckets]}\n" ) for i, (batch_size, block_num) in enumerate(buckets): if batch_size > block_num: continue # Graph memory usage is proportional to seq dimension in a batch batch_seq = batch_size mem_estimate = batch_seq / total_batch_seq * total_mem graphed_bucket = (batch_size, block_num, False) if not mem_estimate >= available_mem: if graphed_bucket not in self.graphed_buckets: self.graphed_buckets.add(graphed_bucket) warmup_shape_count += 1 self.log_warmup(False, i, len(buckets), batch_size, block_num) with HabanaMemoryProfiler() as mem_prof: for index in range(warmup_times): self.warmup_decode(batch_size, block_num, batch) synchronize(self.device) used_mem = self.align_workers( mem_prof.consumed_device_memory, torch.distributed.ReduceOp.MAX ) if graphed_bucket in self.graphed_buckets: available_mem -= used_mem total_mem += used_mem total_batch_seq += batch_seq log_master(logger.info, "Decode warmup successful.\n") log_master( logger.info, f"warmup hpu graph time {int(time.time() - start_time)}s warmup shape count {warmup_shape_count}", ) def get_vision_embeds( self, pixel_values: torch.Tensor, pixel_attention_mask: torch.Tensor, image_sizes: torch.Tensor, image_grid_thw: torch.Tensor, ): embeds = self.model.get_vision_embeds( pixel_values=pixel_values, pixel_attention_mask=pixel_attention_mask, image_sizes=image_sizes, image_grid_thw=image_grid_thw, ) return embeds def get_inputs_embeds( self, input_ids: torch.Tensor, vision_embeds: Optional[torch.Tensor] = None, ): return self.model.get_inputs_embeds( input_ids=input_ids, vision_embeds=vision_embeds, ) def encode_images(self, batch): if batch.pixel_values is not None: device = batch.input_ids.device for request_id, image_id, image_input in batch.pixel_values: pixel_values = image_input["pixel_values"].to(device) if "pixel_attention_mask" in image_input: pixel_attention_mask = image_input["pixel_attention_mask"].to( device ) else: pixel_attention_mask = None if "image_sizes" in image_input: image_sizes = image_input["image_sizes"].to(device) else: image_sizes = None if "image_grid_thw" in image_input: image_grid_thw = image_input["image_grid_thw"] else: image_grid_thw = None encoder_outputs = self.get_vision_embeds( pixel_values=pixel_values, pixel_attention_mask=pixel_attention_mask, image_sizes=image_sizes, image_grid_thw=image_grid_thw, ) batch.update_encoder_cache( encoder_outputs, request_id, batch.image_positions[request_id][image_id], ) batch.pixel_values = None batch.pixel_attention_mask = None batch.image_sizes = None def set_inputs_embeds(self, batch): if batch.has_image_inputs: self.encode_images(batch) vision_embeds = batch.gather_vision_embeds() batch.has_image_inputs = False else: vision_embeds = None inputs_embeds = self.get_inputs_embeds( batch.input_ids, vision_embeds=vision_embeds ) batch.inputs_embeds = inputs_embeds def forward( self, batch: FlashVlmCausalLMBatch, adapter_data: Optional[Dict[str, torch.Tensor]] = None, ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: # Model Forward if batch.speculative_ids is not None: input_ids = batch.input_ids position_ids = batch.position_ids cu_seqlen_prefill = batch.cu_seqlen_prefill kv_cache = self.kv_cache block_tables = batch.block_tables_tensor slots = batch.slots[batch.slot_indices] input_lengths = batch.input_lengths_tensor max_s = batch.max_current_length lm_head_indices = batch.prefill_head_indices speculative_ids = batch.speculative_ids B, speculative_length = speculative_ids.shape new_length = speculative_length + 1 new_input_ids = torch.cat( [input_ids.unsqueeze(-1), speculative_ids], dim=1 ).reshape(-1) arange = torch.arange(new_length, device=position_ids.device).unsqueeze(0) arange_int = arange.to(dtype=torch.int32) new_position_ids = ( position_ids.unsqueeze(-1).expand(B, new_length) + arange ).view(-1) slots = (slots.unsqueeze(-1).expand(B, new_length) + arange_int).view(-1) input_lengths = ( input_lengths.unsqueeze(-1).expand(B, new_length) + arange_int ).view(-1) # Add Copy the block tables for all members block_tables = ( block_tables.unsqueeze(1) .expand(B, new_length, -1) .reshape(B * new_length, -1) .contiguous() ) max_s = max_s + speculative_length input_ids = new_input_ids position_ids = new_position_ids else: input_ids = batch.input_ids inputs_embeds = batch.inputs_embeds position_ids = batch.position_ids cu_seqlen_prefill = batch.cu_seqlen_prefill kv_cache = self.kv_cache block_tables = batch.block_tables_tensor slots = batch.slots[batch.slot_indices] input_lengths = batch.input_lengths_tensor max_s = batch.max_current_length lm_head_indices = batch.prefill_head_indices if self.model.config.model_type in {"qwen2_vl", "qwen2_5_vl"}: if position_ids.dim() == 1 and batch.prefilling: position_ids = self.model.get_position_ids( input_ids.cpu(), batch.image_grid_thw ) batch.position_ids = position_ids attention_mask = None attention_mask_forward = None if self.model.config.model_type == "llama4": attention_mask = (input_ids != self.tokenizer.pad_token_id).long() attention_mask_forward = attention_mask.view(input_lengths.shape[0], -1) if cu_seqlen_prefill is None and self.max_past() is not None: # In decode, not prefill, we're actually overwriting the KV-cache # in a circular buffer mode. # This makes sure the max_s for the decode pass is correct. max_s = min(self.max_past(), max_s) if batch.prefill_cache_indices is not None: slots_pad = torch.zeros_like(input_ids, device=slots.device) slots_pad[batch.prefill_cache_indices] = slots slots = slots_pad else: slots_pad = torch.zeros_like(input_ids, device=slots.device) slots_pad[: slots.shape[0]] = slots slots = slots_pad seqlen = Seqlen( input_lengths=_async_h2d_tensor_copy(input_lengths), ) kwargs = {} batch_size = input_lengths.shape[0] prompt_len = ( input_ids.shape[0] // batch_size if batch.prefilling else batch.hpu_attn_meta.block_list.shape[0] ) if htorch.utils.internal.is_lazy(): kwargs["bypass_hpu_graphs"] = not self.use_graphs( batch.prefilling, prompt_len, batch_size ) if self.sliding_window is not None: attn_mask = seqlen.make_sliding_window_bias( input_lengths.tolist(), self.sliding_window, self.dtype, prompt_len, batch_size, ) seqlen.attn_mask = _async_h2d_tensor_copy(attn_mask) logits, speculative_logits = self.model.forward( inputs_embeds=inputs_embeds, position_ids=_async_h2d_tensor_copy(position_ids), cu_seqlen_prefill=_async_h2d_tensor_copy(cu_seqlen_prefill), kv_cache=kv_cache, slots=_async_h2d_tensor_copy(slots), seqlen=trim_seqlen_metadata(seqlen), hpu_attention_meta=batch.hpu_attn_meta, lm_head_indices=_async_h2d_tensor_copy(lm_head_indices), attention_mask=attention_mask_forward, **kwargs, ) batch.image_grid_thw = None batch.free_encoder_cache() return logits, speculative_logits
text-generation-inference/backends/gaudi/server/text_generation_server/models/flash_vlm_causal_lm.py/0
{ "file_path": "text-generation-inference/backends/gaudi/server/text_generation_server/models/flash_vlm_causal_lm.py", "repo_id": "text-generation-inference", "token_count": 20373 }
285
import torch def get_hpu_free_memory(device, memory_fraction): free_hpu_memory, _ = torch.hpu.mem_get_info() return free_hpu_memory def synchronize_hpu(device): torch.hpu.synchronize() def noop(*args, **kwargs): pass empty_cache = noop synchronize = synchronize_hpu get_free_memory = get_hpu_free_memory
text-generation-inference/backends/gaudi/server/text_generation_server/utils/import_utils.py/0
{ "file_path": "text-generation-inference/backends/gaudi/server/text_generation_server/utils/import_utils.py", "repo_id": "text-generation-inference", "token_count": 131 }
286
#!/bin/bash ldconfig 2>/dev/null || echo 'unable to refresh ld cache, not a big deal in most cases' # Check if --sharded argument is present in the command line arguments if [[ "$*" == *"--sharded true"* ]]; then echo 'setting PT_HPU_ENABLE_LAZY_COLLECTIVES=1 for sharding' export PT_HPU_ENABLE_LAZY_COLLECTIVES=1 fi text-generation-launcher $@
text-generation-inference/backends/gaudi/tgi-entrypoint.sh/0
{ "file_path": "text-generation-inference/backends/gaudi/tgi-entrypoint.sh", "repo_id": "text-generation-inference", "token_count": 127 }
287
from helpers import create_request from text_generation_server.generator import NeuronGenerator from text_generation_server.pb.generate_pb2 import Batch def test_decode(neuron_model_config): """Verify that a decoding for a single request generates the expected output.""" config_name = neuron_model_config["name"] neuron_model_path = neuron_model_config["neuron_model_path"] generator = NeuronGenerator.from_pretrained(neuron_model_path) for do_sample in [True, False]: mode = "sample" if do_sample else "greedy" print(f"{config_name}[{mode}]") _test_decode(config_name, generator, do_sample) generator.clear() def _test_decode(config_name, generator, do_sample): input_text = ( "It was a bright cold day in April, and the clocks were striking thirteen." ) max_new_tokens = 20 request = create_request( id=0, inputs=input_text, max_new_tokens=max_new_tokens, do_sample=do_sample ) max_length = generator.model.neuron_config.sequence_length batch = Batch(id=0, requests=[request], size=1, max_tokens=max_length) generations, next_batch = generator.prefill(batch) # We already generated one token: call decode max_new_tokens - 1 times for _ in range(max_new_tokens - 1): assert next_batch.size == 1 assert next_batch.max_tokens == max_length assert len(generations) == 1 assert len(generations[0].tokens.ids) == 1 generations, next_batch = generator.decode([next_batch]) assert next_batch is None assert len(generations) == 1 output = generations[0].generated_text assert output.generated_tokens == max_new_tokens assert output.finish_reason == 0 if do_sample: expected_text = { "llama": " I sat alone in the café", "qwen2": " The air was so still", "granite": "1984, George Orwell", }[config_name] assert expected_text in output.text else: print(output.text) expected_text = { "llama": " The world was holding its breath as the world's top scientists and engineers gathered at the secret underground facility", "qwen2": " I was sitting in my room, staring at the ceiling, when the door opened and in came a", "granite": "\n\nThis opening line from George Orwell's dystopian novel \"198", }[config_name] assert output.text == expected_text
text-generation-inference/backends/neuron/tests/server/test_decode.py/0
{ "file_path": "text-generation-inference/backends/neuron/tests/server/test_decode.py", "repo_id": "text-generation-inference", "token_count": 932 }
288
#ifndef TGI_BACKEND_TRTLLM #define TGI_BACKEND_TRTLLM #include <cmath> #include <cstdint> #include <expected> #include <fstream> #include <list> #include <span> #include <nlohmann/json.hpp> #include <spdlog/spdlog.h> #include <spdlog/fmt/fmt.h> #include <tensorrt_llm/executor/executor.h> namespace huggingface::tgi::backends::trtllm { namespace tle = tensorrt_llm::executor; using json = nlohmann::json; using request_id_t = uint64_t; using token_id_t = tle::TokenIdType; /** * Represent the parameters used for generation */ struct generation_params_t { uint32_t max_new_tokens; }; /** * Represent the parameters used to sample tokens from the logit distribution */ struct sampling_params_t { uint32_t top_k; float_t top_p; float_t repetition_penalty; float_t frequency_penalty; float_t temperature; uint64_t seed; constexpr explicit operator tle::SamplingConfig() const { return tle::SamplingConfig{ 1, top_k, top_p, std::nullopt, std::nullopt, std::nullopt, seed, temperature, std::nullopt, std::nullopt, repetition_penalty, std::nullopt, frequency_penalty, std::nullopt }; } }; /** * Represent possible values from transformers generation `generation_config.json`. * It usually stores default sampling parameters to use, such as top_p, temperature, etc. */ struct generation_config_t { float_t top_p; float_t temperature; std::list<std::vector<int32_t>> stop_words; constexpr explicit generation_config_t(const json &config) : top_p(config.value("top_p", 1.0f)), temperature(config.value("temperature", 1.0f)), stop_words(0) { if (config.contains("/eos_token_id"_json_pointer) && config["/eos_token_id"_json_pointer].is_array()) { const auto &eos_token_id = config["/eos_token_id"_json_pointer]; std::for_each(eos_token_id.begin(), eos_token_id.end(), [this](const auto token_id) { stop_words.emplace_back(1, token_id.template get<int32_t>()); }); SPDLOG_DEBUG("Detected {:d} predefined stop_words from generation_config.json", stop_words.size()); } } }; /** * Helper class representing various items which are stored within the TensorRT-LLM engines folder and * can be retrieved at runtime */ class backend_workspace_t { private: constexpr static auto as_json = [](const std::filesystem::path &path) -> json { std::ifstream config_f(path); return json::parse(config_f); }; std::filesystem::path engines_folder_; std::filesystem::path executor_worker_path_; json config_; generation_config_t generation_config_; public: backend_workspace_t(std::filesystem::path &engines_folder, std::filesystem::path &executor_worker_path) : engines_folder_(engines_folder), executor_worker_path_(executor_worker_path), config_(as_json(engines_folder / "config.json")), generation_config_(as_json(engines_folder / "generation_config.json")) {}; backend_workspace_t(std::filesystem::path &&engines_folder, std::filesystem::path &&executor_worker_path) : engines_folder_(engines_folder), executor_worker_path_(executor_worker_path), config_(as_json(engines_folder / "config.json")), generation_config_(as_json(engines_folder / "generation_config.json")) {}; /** * Path to the folder containing the TensorRT-LLM engines * @return local filesystem path to the folder */ [[nodiscard]] constexpr std::filesystem::path engines_folder() const { return engines_folder_; } /** * Hugging Face transformers' generated `generation_config_t` mapping information stored in the * `generation_config.json` holding default generation parameters. * @return `generation_config_t` */ [[nodiscard]] constexpr const generation_config_t &generation_config() const { return generation_config_; } /** * Factory method returning new `tensorrt_llm::executor::ParallelConfig` instance used * to initialize `tensorrt_llm::executor::Executor` with multi-instance communication information * @return `tensorrt_llm::executor::ParallelConfig` instance */ [[nodiscard]] tle::ParallelConfig parallel_config() const; /** * Factory method returning new `tensorrt_llm::executor::ExecutorConfig` instance used * to initialize `tensorrt_llm::executor::Executor` * @return `tensorrt_llm::executor::ExecutorConfig` instance */ [[nodiscard]] tle::ExecutorConfig executor_config() const; }; /** * Error raised by the underlying backend implementation */ enum backend_error_t { EXECUTOR_NOT_READY = 3, EXECUTOR_SCHEDULING_FAILED = 4, }; /** * Actual TensorRT-LLM backend implementation interacting with TensorRT-LLM Executor service to * - schedule new request * - pull status of submitted request(s) * - cancel submitted request(s) */ class backend_t { private: backend_workspace_t workspace; tle::Executor executor_; public: backend_t(std::filesystem::path &engines_folder, std::filesystem::path &executor_worker_path); backend_t(std::filesystem::path &&engines_folder, std::filesystem::path &&executor_worker_path) : backend_t(engines_folder, executor_worker_path) {}; /** * Submit a new request to the executor * @param token_ids * @param generation_params * @param sampling_params * @return Either newly submitted request's id or the error why it failed to submit */ [[nodiscard("Discarded executor request_id needs to be assigned")]] std::expected<request_id_t, backend_error_t> submit(std::span<const token_id_t> token_ids, generation_params_t generation_params, sampling_params_t sampling_params) noexcept; /** * Query the number of tokens available across all in-flight generations * @return */ [[nodiscard("Pulling out the number of tokens")]] size_t num_tokens_ready() const noexcept; /** * Pull out newly generated tokens from the executor * @return */ [[nodiscard("")]] std::vector<tle::Response> pull_tokens() noexcept; /** * Cancel the specified request on the executor' set * @param request_id Request's Identifier to remove from the in-flight executor */ void cancel(request_id_t) noexcept; }; /** * Create a TensorRT-LLM executor from a workspace */ const auto executor_factory_initializer = [](const backend_workspace_t &workspace) -> tle::Executor { return {workspace.engines_folder(), tensorrt_llm::executor::ModelType::kDECODER_ONLY, workspace.executor_config()}; }; } /** * Helper structures to define formatting strategies for various types in the backend */ template<> struct fmt::formatter<huggingface::tgi::backends::trtllm::generation_params_t> : formatter<string_view> { auto format(huggingface::tgi::backends::trtllm::generation_params_t const &c, format_context &ctx) const -> format_context::iterator { return fmt::format_to(ctx.out(), "generation_params_t{{ max_new_tokens={:d} }}", c.max_new_tokens); } }; template<> struct fmt::formatter<huggingface::tgi::backends::trtllm::sampling_params_t> : formatter<string_view> { auto format(huggingface::tgi::backends::trtllm::sampling_params_t const &c, format_context &ctx) const -> format_context::iterator { return fmt::format_to( ctx.out(), "sampling_params_t{{ top_k={:d}, top_p={:.3f}, repetition_penalty={:.3f}, frequency_penalty={:.3f}, temperature={:.3f}, seed={:d} }}", c.top_k, c.top_p, c.repetition_penalty, c.frequency_penalty, c.temperature, c.seed ); } }; #endif
text-generation-inference/backends/trtllm/csrc/backend.hpp/0
{ "file_path": "text-generation-inference/backends/trtllm/csrc/backend.hpp", "repo_id": "text-generation-inference", "token_count": 3772 }
289
use crate::block_allocator::{Allocator, BlockAllocation}; use slotmap::{DefaultKey, SlotMap}; use std::hash::{Hash, Hasher}; use std::{ collections::{BTreeSet, HashMap}, sync::Arc, }; fn hash(slice: &[u32]) -> u64 { assert!(!slice.is_empty()); if slice.len() == 1 { slice[0] as u64 } else { let mut s = std::hash::DefaultHasher::new(); slice.hash(&mut s); s.finish() } } pub struct RadixAllocator { allocation_id: u64, allocations: HashMap<u64, RadixAllocation>, cache_blocks: RadixTrie, /// Blocks that are immediately available for allocation. free_blocks: Vec<u32>, #[allow(dead_code)] // This isn't used because the prefix need to match without the windowing // mecanism. This at worst is overallocating, not necessarily being wrong. window_size: Option<u32>, block_size: u32, } impl RadixAllocator { pub fn new(block_size: u32, n_blocks: u32, window_size: Option<u32>) -> Self { RadixAllocator { allocation_id: 0, allocations: HashMap::new(), cache_blocks: RadixTrie::new(block_size as usize), // Block 0 is reserved for health checks. free_blocks: (1..n_blocks).collect(), window_size, block_size, } } fn alloc_or_reclaim(&mut self, n_blocks_needed: usize) -> Option<Vec<u32>> { if self.free_blocks.len() < n_blocks_needed { // This is a bit annoying, we first extend the free list and then // split it off again below. This is because we need to put it on // the free list if we cannot allocate enough blocks. This is only // temporary, the trie needs to be able to report whether it can // allocate the requested amount. Just not implemented yet. tracing::debug!( "Free blocks {} need {n_blocks_needed}", self.free_blocks.len() ); self.free_blocks.extend( self.cache_blocks .evict(n_blocks_needed - self.free_blocks.len()), ); } if self.free_blocks.len() >= n_blocks_needed { Some( self.free_blocks .split_off(self.free_blocks.len() - n_blocks_needed), ) } else { None } } } // Allocator trait impl Allocator for RadixAllocator { fn allocate( &mut self, tokens: u32, prefill_tokens: Option<Arc<Vec<u32>>>, ) -> Option<BlockAllocation> { let mut blocks = vec![]; let prefix_node = if let Some(prefill_tokens) = prefill_tokens.as_ref() { let node_id = self .cache_blocks .find(prefill_tokens.as_slice(), &mut blocks); node_id } else { self.cache_blocks.root_id() }; // Even if this allocation fails below, we need to increase he // refcount to ensure that the prefix that was found is not evicted. self.cache_blocks .incref(prefix_node) .expect("Failed to increment refcount"); let prefix_len = blocks.len() * self.block_size as usize; let suffix_len = tokens - prefix_len as u32; let suffix_blocks = suffix_len.div_ceil(self.block_size); tracing::info!("Prefix {prefix_len} - Suffix {suffix_len}"); match self.alloc_or_reclaim(suffix_blocks as usize) { Some(suffix_blocks) => blocks.extend(suffix_blocks), None => { tracing::debug!("Cannot allocate {:?}", self.cache_blocks); tracing::debug!("Found {prefix_len} prefix tokens need {suffix_blocks} suffix blocks for {tokens} tokens"); tracing::debug!("Block size {}", self.block_size); self.cache_blocks .decref(prefix_node) .expect("Failed to decrement refcount"); return None; } } // 1:1 mapping of blocks and slots. let slots = if self.block_size == 1 { blocks.clone() } else { let mut slots = Vec::with_capacity(blocks.len() * self.block_size as usize); 'slots: for block_id in &blocks { for s in (block_id * self.block_size)..((block_id + 1) * self.block_size) { slots.push(s); if slots.len() as u32 == tokens { break 'slots; } } } slots }; let allocation = RadixAllocation { prefix_node, cached_prefix_len: prefix_len, prefill_tokens: prefill_tokens.clone(), }; self.allocation_id += 1; self.allocations.insert(self.allocation_id, allocation); Some(BlockAllocation { allocation_id: self.allocation_id, block_allocator: None, blocks, slots, prefix_len: prefix_len as u32, }) } fn free(&mut self, blocks: Vec<u32>, allocation_id: u64) { let allocation = match self.allocations.remove(&allocation_id) { Some(allocation) => allocation, None => unreachable!("Tried to free an unknown allocation."), }; self.cache_blocks .decref(allocation.prefix_node) .expect("Failed to decrement refcount"); if let Some(prefill_tokens) = allocation.prefill_tokens { let prefill_tokens = prefill_tokens.as_slice(); // If there are prefill tokens that did not come from the cache, // add them to the cache. if prefill_tokens.len() > allocation.cached_prefix_len { let aligned = (prefill_tokens.len() / self.block_size as usize) * self.block_size as usize; if aligned > 0 { let prefix_len = self .cache_blocks .insert( &prefill_tokens[..aligned], &blocks[..aligned / self.block_size as usize], ) // Unwrap, failing is a programming error. .expect("Failed to store prefill tokens"); // We can have a prefill with the following structure: // // |---| From the prefix cache. // A B C D E F G //|--------| Found in the trie during insertion. // // This means that while processing this request there was a // partially overlapping request that had A..=E in its // prefill. In this case we need to free the blocks D E. if prefix_len > allocation.cached_prefix_len { self.free_blocks.extend( &blocks[allocation.cached_prefix_len / self.block_size as usize ..prefix_len / self.block_size as usize], ); } } } // Free non-prefill blocks. self.free_blocks .extend(&blocks[prefill_tokens.len() / self.block_size as usize..]); } else { self.free_blocks.extend(blocks); } } } struct RadixAllocation { prefix_node: NodeId, cached_prefix_len: usize, prefill_tokens: Option<Arc<Vec<u32>>>, } // Radix trie that is heavily inspired by radix attention from sglang. // // The trie is optimized for prefix caching: // // - A normal radix trie stores discrete values. In this radix trie, // inserting *abc* with value *xyz* will also enable lookup for // *a* (*x*) and *ab* (*xy*). // - As a result, every value is required to have the same length as // the key. // - We store additional information in each node, such as last access // time and a reference count. #[derive(Debug)] pub enum TrieError { InvalidNodeId, RefCountUnderflow, } pub type NodeId = DefaultKey; #[derive(Debug)] pub struct RadixTrie { /// Identifier of the root nod. root: DefaultKey, /// Leave node identifiers ordered by increasing recency. leaves: BTreeSet<(u64, NodeId)>, /// All trie nodes. nodes: SlotMap<NodeId, TrieNode>, /// Time as a monotonically increating counter to avoid the system /// call that a real time lookup would require. time: u64, /// All blocks need to be aligned with this block_size: usize, } impl RadixTrie { /// Construct a new radix trie. pub fn new(block_size: usize) -> Self { let root = TrieNode::new(vec![], vec![], 0, None); let mut nodes = SlotMap::new(); let root = nodes.insert(root); RadixTrie { leaves: BTreeSet::new(), nodes, root, time: 0, block_size, } } /// Find the prefix of the given tokens. /// /// The blocks corresponding to the part of the prefix that could be found /// are written to `blocks`. The number of blocks is in `0..=tokens.len()`. /// Returns the identifier of the trie node that contains the longest /// prefix. The node identifier can be used by callers to e.g. increase its /// reference count. /// /// Using this method will update the access time of the traversed nodes. pub fn find(&mut self, key: &[u32], blocks: &mut Vec<u32>) -> NodeId { self.time += 1; self.find_(self.root, key, blocks) } /// Find worker. fn find_(&mut self, node_id: NodeId, key: &[u32], blocks: &mut Vec<u32>) -> NodeId { let node = &self.nodes[node_id]; if key.len() >= self.block_size { let node_key = hash(&key[..self.block_size]); if let Some(&child_id) = node.children.get(&node_key) { self.update_access_time(child_id); let child = self.nodes.get(child_id).expect("Invalid child identifier"); let shared_prefix_len = shared_prefix(&child.key, key, self.block_size); assert_eq!(shared_prefix_len % self.block_size, 0); blocks.extend(&child.blocks[..shared_prefix_len / self.block_size]); // A node represents the prefix of its children. So, only // recurse when there is a full prefix match. let key = &key[shared_prefix_len..]; if !key.is_empty() && shared_prefix_len == child.key.len() { return self.find_(child_id, key, blocks); } else { return child_id; } } } node_id } /// Decrease the reference count of a node. pub fn decref(&mut self, node_id: NodeId) -> Result<(), TrieError> { // We don't care about refcounting for root, since it will never // be evicted. if node_id == self.root { return Ok(()); } let node = self .nodes .get_mut(node_id) .ok_or(TrieError::InvalidNodeId)?; if node.ref_count == 0 { return Err(TrieError::RefCountUnderflow); } node.ref_count -= 1; if node.ref_count == 0 { assert!( node.children.is_empty(), "Nodes with children must have refcount > 0" ); self.leaves.insert((node.last_accessed, node_id)); } Ok(()) } /// Increase the reference count of a node. pub fn incref(&mut self, node_id: NodeId) -> Result<(), TrieError> { if node_id == self.root { return Ok(()); } let node = self .nodes .get_mut(node_id) .ok_or(TrieError::InvalidNodeId)?; if node.ref_count == 0 { self.leaves.remove(&(node.last_accessed, node_id)); } node.ref_count += 1; Ok(()) } /// Evict `n_blocks` from the trie. /// /// Returns the evicted blocks. When the length is less than `n_blocks`, /// not enough blocks could be evicted. pub fn evict(&mut self, n_blocks: usize) -> Vec<u32> { // NOTE: we don't return Result here. If any of the unwrapping fails, // it's a programming error in the trie implementation, not a user // error caused by e.g. an invalid argument. // TODO: add some bookkeeping in the future to check whether we can // evict n_blocks and return `None` if we can't. We are now needlessly // evicting prefixes from the cache in such a case. let mut evicted = Vec::new(); tracing::debug!("Evicting in search of {n_blocks}"); while let Some((last_access, node_id)) = self.leaves.pop_first() { let blocks_needed = n_blocks.saturating_sub(evicted.len()); tracing::debug!("Evicting node {node_id:?} "); let node = self.nodes.get(node_id).expect("Leave does not exist"); assert_eq!( node.ref_count, 0, "Leaf must have refcount of 0, got {}", node.ref_count ); if blocks_needed >= node.blocks.len() { // We need to evict the whole node if we need more blocks than it has. let node = self.remove_node(node_id); evicted.extend(node.blocks); if evicted.len() >= n_blocks { break; } } else { // The node has more blocks than needed, so we'll just remove // the required number of blocks and leave the remaining blocks // untouched. let node = self.nodes.get_mut(node_id).expect("Leave does not exist"); let truncate_blocks = node.blocks.len() - blocks_needed; let truncate_tokens = truncate_blocks * self.block_size; node.key.truncate(truncate_tokens); evicted.extend(node.blocks.split_off(truncate_blocks)); self.leaves.insert((last_access, node_id)); break; } } evicted } /// Insert a prefill along with its blocks. /// /// This method returns the length of the prefix that was already /// in the trie. E.g. if the length is 10, this means that for /// the first 10 elements of the tree **the blocks are not updated**. pub fn insert(&mut self, tokens: &[u32], blocks: &[u32]) -> Result<usize, TrieError> { self.time += 1; let common = self.insert_(self.root, tokens, blocks)?; Ok(common) } /// Insertion worker. fn insert_( &mut self, node_id: NodeId, tokens: &[u32], blocks: &[u32], ) -> Result<usize, TrieError> { // TODO: in the future we may want to check that the blocks match for // the part of the prefix that is already in the trie to detect // mismatches. assert_eq!(tokens.len(), blocks.len() * self.block_size); let node_key = hash(&tokens[..self.block_size]); if let Some(&child_id) = self.nodes[node_id].children.get(&node_key) { self.update_access_time(child_id); let child = self .nodes .get_mut(child_id) // Unwrap here, since failure is a bug. .expect("Child node does not exist"); let shared_prefix_len = shared_prefix(&child.key, tokens, self.block_size); // We are done, the prefix is already in the trie. if shared_prefix_len == tokens.len() || shared_prefix_len == 0 { return Ok(shared_prefix_len); } // The node's prefix is a prefix of the insertion prefix. if shared_prefix_len == child.key.len() { return Ok(shared_prefix_len + self.insert_( child_id, &tokens[shared_prefix_len..], &blocks[shared_prefix_len / self.block_size..], )?); } // The node's prefix and the insertion prefix only match partially, // split the node to just contain the matching part. Then insert the // remainder of the prefix into the node again let child_id = self.split_node(child_id, shared_prefix_len); let key = &tokens[shared_prefix_len..]; let blocks = &blocks[shared_prefix_len / self.block_size..]; Ok(shared_prefix_len + self.insert_(child_id, key, blocks)?) } else { self.add_node(node_id, tokens, blocks); Ok(0) } } fn split_node(&mut self, node_id: NodeId, prefix_len: usize) -> NodeId { // We have to make the current node a child to ensure that its // properties and node id stay the same. // This funcion unwraps, an invalid node_id is a programming error. let node = self .nodes .get_mut(node_id) .expect("Node to-be split does not exist"); let mut parent_key = node.key.split_off(prefix_len); let prefix_blocks = prefix_len / self.block_size; let mut parent_blocks = node.blocks.split_off(prefix_blocks); // Move first part of the prefix to the parent. We swap to avoid // an allocation + copy for both splits of the key/blocks. std::mem::swap(&mut node.key, &mut parent_key); std::mem::swap(&mut node.blocks, &mut parent_blocks); let node_key = hash(&node.key[..self.block_size]); let grandparent_id = node.parent.expect("Node does not have a parent"); let parent_id = self.add_node(grandparent_id, parent_key, parent_blocks); self.add_node_to_parent(parent_id, node_key, node_id); // Reborrow to make the borrow checker happy. let node = self .nodes .get_mut(node_id) .expect("Node to-be split does not exist"); node.parent = Some(parent_id); parent_id } /// Create a node and add it to the parent. fn add_node( &mut self, parent_id: NodeId, key: impl Into<Vec<u32>>, blocks: impl Into<Vec<u32>>, ) -> NodeId { let key = key.into(); let blocks = blocks.into(); let first = hash(&key[..self.block_size]); let child = TrieNode::new(key, blocks, self.time, Some(parent_id)); let child_id = self.nodes.insert(child); self.add_node_to_parent(parent_id, first, child_id); self.leaves.insert((self.time, child_id)); child_id } /// Add a node to the parent. fn add_node_to_parent(&mut self, parent_id: NodeId, hash: u64, child_id: NodeId) { // Unwrap here, passing in an unknown id is a programming error. let parent = self.nodes.get_mut(parent_id).expect("Unknown parent node"); if parent.children.insert(hash, child_id).is_none() { // Only increase reference count if child does not replace another child. self.incref(parent_id) .expect("Failed to increase parent refcount"); } } /// Remove a node from the trie. fn remove_node(&mut self, node_id: NodeId) -> TrieNode { // Unwrap here, passing in an unknown id is a programming error. let node = self.nodes.remove(node_id).expect("Unknown node"); assert!( node.children.is_empty(), "Tried to remove a node with {} children", node.children.len() ); let parent_id = node.parent.expect("Attempted to remove root node"); let parent = self.nodes.get_mut(parent_id).expect("Unknown parent node"); let node_key = hash(&node.key[..self.block_size]); parent.children.remove(&node_key); self.decref(parent_id) .expect("Failed to decrease parent refcount"); node } fn update_access_time(&mut self, node_id: NodeId) { // Unwrap here, passing in an unknown id is a programming error. let node = self.nodes.get_mut(node_id).expect("Unknown node"); // Update the ordered leaves set if the node is a leave. if self.leaves.remove(&(node.last_accessed, node_id)) { self.leaves.insert((self.time, node_id)); } node.last_accessed = self.time; } #[allow(dead_code)] #[doc(hidden)] /// Print debugging output for the trie. /// /// In contrast to `Debug` nicely formatted. pub fn print_debug(&self) { self.print_debug_(self.root, 0); } fn print_debug_(&self, node_id: NodeId, indent: usize) { let node = &self.nodes[node_id]; eprintln!( "{}{:?}, key: {:?}, blocks: {:?}, ref_count: {}, last_accessed: {}, parent: {:?}, children: {:?}", " ".repeat(indent), node_id, node.key, node.blocks, node.ref_count, node.last_accessed, node.parent, node.children ); for child_id in self.nodes[node_id].children.values() { self.print_debug_(*child_id, indent + 2); } } pub(crate) fn root_id(&self) -> DefaultKey { self.root } } /// Trie node. #[derive(Debug)] struct TrieNode { blocks: Vec<u32>, children: HashMap<u64, NodeId>, key: Vec<u32>, last_accessed: u64, parent: Option<NodeId>, ref_count: usize, } impl TrieNode { fn new(key: Vec<u32>, blocks: Vec<u32>, last_accessed: u64, parent: Option<NodeId>) -> Self { TrieNode { children: HashMap::new(), key, blocks, last_accessed, parent, ref_count: 0, } } } fn shared_prefix(left: &[u32], right: &[u32], block_size: usize) -> usize { let full = left.iter().zip(right).take_while(|(a, b)| a == b).count(); // NOTE: this is the case because the child node was chosen based on // matching the first character of the key/prefix. assert!(full > 0, "Prefixes must at least share 1 token"); (full / block_size) * block_size } #[cfg(test)] mod tests { use std::sync::Arc; use rand::{ distributions::Uniform, prelude::Distribution, rngs::SmallRng, seq::SliceRandom, SeedableRng, }; use rustc_hash::FxHashSet; use super::*; #[test] fn allocator_block_size() { let mut cache = RadixAllocator::new(2, 12, None); let allocation = cache.allocate(8, Some(Arc::new(vec![0, 1, 2, 3]))).unwrap(); assert_eq!(allocation.blocks, vec![8, 9, 10, 11]); assert_eq!(allocation.slots, vec![16, 17, 18, 19, 20, 21, 22, 23]); assert_eq!(allocation.prefix_len, 0); cache.free(allocation.blocks.clone(), allocation.allocation_id); let allocation = cache.allocate(8, Some(Arc::new(vec![0, 1, 2, 3]))).unwrap(); assert_eq!(allocation.blocks, vec![8, 9, 10, 11]); assert_eq!(allocation.slots, vec![16, 17, 18, 19, 20, 21, 22, 23]); assert_eq!(allocation.prefix_len, 4); } #[test] fn allocator_block_size_non_aligned() { let mut cache = RadixAllocator::new(2, 12, None); let allocation = cache.allocate(7, Some(Arc::new(vec![0, 1, 2]))).unwrap(); assert_eq!(allocation.blocks, vec![8, 9, 10, 11]); assert_eq!(allocation.slots, vec![16, 17, 18, 19, 20, 21, 22]); assert_eq!(allocation.prefix_len, 0); cache.free(allocation.blocks.clone(), allocation.allocation_id); let allocation = cache.allocate(7, Some(Arc::new(vec![0, 1, 2]))).unwrap(); assert_eq!(allocation.blocks, vec![8, 9, 10, 11]); assert_eq!(allocation.slots, vec![16, 17, 18, 19, 20, 21, 22]); assert_eq!(allocation.prefix_len, 2); } #[test] fn allocator_reuses_prefixes() { let mut cache = RadixAllocator::new(1, 12, None); let allocation = cache.allocate(8, Some(Arc::new(vec![0, 1, 2, 3]))).unwrap(); assert_eq!(allocation.blocks, vec![4, 5, 6, 7, 8, 9, 10, 11]); assert_eq!(allocation.blocks, allocation.slots); assert_eq!(allocation.prefix_len, 0); cache.free(allocation.blocks.clone(), allocation.allocation_id); let allocation = cache.allocate(8, Some(Arc::new(vec![0, 1, 2, 3]))).unwrap(); assert_eq!(allocation.blocks, vec![4, 5, 6, 7, 8, 9, 10, 11]); assert_eq!(allocation.prefix_len, 4); } #[test] fn allocator_collects_older_prefixes_first() { let mut cache = RadixAllocator::new(1, 7, None); let allocation1 = cache.allocate(4, Some(Arc::new(vec![0, 1, 2, 3]))).unwrap(); assert_eq!(allocation1.blocks, vec![3, 4, 5, 6]); assert_eq!(allocation1.prefix_len, 0); let allocation2 = cache.allocate(2, Some(Arc::new(vec![4, 5]))).unwrap(); assert_eq!(allocation2.blocks, vec![1, 2]); assert_eq!(allocation2.prefix_len, 0); cache.free(allocation1.blocks.clone(), allocation1.allocation_id); cache.free(allocation2.blocks.clone(), allocation2.allocation_id); // We should get the blocks of the first allocation, since they are more recent. let allocation3 = cache.allocate(4, Some(Arc::new(vec![6, 7, 8, 9]))).unwrap(); assert_eq!(allocation3.blocks, vec![3, 4, 5, 6]); assert_eq!(allocation3.prefix_len, 0); } #[test] fn allocator_frees_fully_overlapping_prefills() { let mut cache = RadixAllocator::new(1, 10, None); let allocation1 = cache.allocate(4, Some(Arc::new(vec![0, 1, 2, 3]))).unwrap(); let allocation2 = cache.allocate(4, Some(Arc::new(vec![0, 1, 2, 3]))).unwrap(); cache.free(allocation2.blocks.clone(), allocation2.allocation_id); cache.free(allocation1.blocks.clone(), allocation1.allocation_id); let allocation3 = cache.allocate(4, Some(Arc::new(vec![0, 1, 2, 3]))).unwrap(); assert_eq!(allocation3.prefix_len, 4); // 10 blocks, of which 1 reserved for health checks, 4 for the cached blocks. assert_eq!(cache.free_blocks.len(), 5); } #[test] fn allocator_frees_partially_overlapping_prefills() { let mut cache = RadixAllocator::new(1, 20, None); let allocation1 = cache.allocate(4, Some(Arc::new(vec![0, 1]))).unwrap(); assert_eq!(allocation1.blocks, vec![16, 17, 18, 19]); assert_eq!(allocation1.prefix_len, 0); cache.free(allocation1.blocks.clone(), allocation1.allocation_id); let allocation2 = cache .allocate(8, Some(Arc::new(vec![0, 1, 2, 3, 4, 5]))) .unwrap(); assert_eq!(allocation2.blocks, vec![16, 17, 12, 13, 14, 15, 18, 19]); assert_eq!(allocation2.prefix_len, 2); let allocation3 = cache .allocate(8, Some(Arc::new(vec![0, 1, 2, 3, 6, 7]))) .unwrap(); assert_eq!(allocation3.blocks, vec![16, 17, 6, 7, 8, 9, 10, 11]); assert_eq!(allocation3.prefix_len, 2); cache.free(allocation3.blocks.clone(), allocation3.allocation_id); cache.free(allocation2.blocks.clone(), allocation2.allocation_id); // 20 blocks, of which 1 reserved for health checks, 6 for allocation3, 2 for allocation2. assert_eq!(cache.free_blocks.len(), 11); let allocation4 = cache .allocate(6, Some(Arc::new(vec![0, 1, 2, 3, 4, 5]))) .unwrap(); assert_eq!(allocation4.blocks, vec![16, 17, 6, 7, 14, 15]); assert_eq!(allocation4.prefix_len, 6); assert_eq!(cache.free_blocks.len(), 11); let allocation5 = cache .allocate(6, Some(Arc::new(vec![0, 1, 2, 3, 6, 7]))) .unwrap(); assert_eq!(allocation5.blocks, vec![16, 17, 6, 7, 8, 9]); assert_eq!(allocation5.prefix_len, 6); assert_eq!(cache.free_blocks.len(), 11); } #[test] fn trie_insertions_have_correct_prefix_len() { let mut trie = RadixTrie::new(1); assert_eq!(trie.insert(&[0, 1, 2], &[0, 1, 2]).unwrap(), 0); // Already exists. assert_eq!(trie.insert(&[0, 1, 2], &[0, 1, 2]).unwrap(), 3); // Completely new at root-level assert_eq!(trie.insert(&[1, 2, 3], &[1, 2, 3]).unwrap(), 0); // Contains full prefix, but longer. assert_eq!(trie.insert(&[0, 1, 2, 3, 4], &[0, 1, 2, 3, 4]).unwrap(), 3); // Shares partial prefix, we need a split. assert_eq!( trie.insert(&[0, 1, 2, 3, 5, 6, 7], &[0, 1, 2, 3, 5, 6, 7]) .unwrap(), 4 ); } #[test] fn trie_insertions_block_size() { let mut trie = RadixTrie::new(2); assert_eq!(trie.insert(&[0, 1, 2, 3], &[0, 1]).unwrap(), 0); // Already exists. // But needs to be block_size aligned assert_eq!(trie.insert(&[0, 1, 2, 3], &[0, 1]).unwrap(), 4); // Completely new at root-level assert_eq!(trie.insert(&[1, 2, 3, 4], &[1, 2]).unwrap(), 0); // Contains full prefix, but longer. assert_eq!(trie.insert(&[0, 1, 2, 3, 4, 5], &[0, 1, 2]).unwrap(), 4); // Shares partial prefix, we need a split. assert_eq!( trie.insert(&[0, 1, 3, 4, 5, 6, 7, 8], &[0, 1, 2, 3]) .unwrap(), 2 ); } #[test] fn trie_get_returns_correct_blocks() { let mut trie = RadixTrie::new(1); trie.insert(&[0, 1, 2], &[0, 1, 2]).unwrap(); trie.insert(&[1, 2, 3], &[1, 2, 3]).unwrap(); trie.insert(&[0, 1, 2, 3, 4], &[0, 1, 2, 3, 4]).unwrap(); trie.insert(&[0, 1, 2, 3, 5, 6, 7], &[0, 1, 2, 3, 5, 6, 7]) .unwrap(); let mut blocks = Vec::new(); trie.find(&[0], &mut blocks); assert_eq!(blocks, vec![0]); blocks.clear(); trie.find(&[0, 1, 2], &mut blocks); assert_eq!(blocks, vec![0, 1, 2]); blocks.clear(); trie.find(&[1, 2, 3], &mut blocks); assert_eq!(blocks, vec![1, 2, 3]); blocks.clear(); trie.find(&[0, 1, 2, 3], &mut blocks); assert_eq!(blocks, vec![0, 1, 2, 3]); blocks.clear(); trie.find(&[0, 1, 2, 3, 4], &mut blocks); assert_eq!(blocks, vec![0, 1, 2, 3, 4]); blocks.clear(); trie.find(&[0, 1, 2, 3, 5], &mut blocks); assert_eq!(blocks, vec![0, 1, 2, 3, 5]); } #[test] fn trie_evict_removes_correct_blocks() { let mut trie = RadixTrie::new(1); trie.insert(&[0, 1, 2], &[0, 1, 2]).unwrap(); trie.insert(&[0, 1, 2, 3, 5, 6, 7], &[0, 1, 2, 3, 5, 6, 7]) .unwrap(); trie.insert(&[0, 1, 2, 3, 4], &[0, 1, 2, 3, 4]).unwrap(); trie.insert(&[1, 2, 3], &[1, 2, 3]).unwrap(); let mut blocks = Vec::new(); // Remove less than the leave blocks. assert_eq!(trie.evict(1), vec![7]); trie.find(&[0, 1, 2, 3, 5, 6, 7], &mut blocks); assert_eq!(blocks, vec![0, 1, 2, 3, 5, 6]); // Refresh other leaf. trie.find(&[0, 1, 2, 3, 4], &mut blocks); trie.find(&[1, 2, 3], &mut blocks); // Remove the leave blocks exactly. assert_eq!(trie.evict(2), vec![5, 6]); blocks.clear(); trie.find(&[0, 1, 2, 3, 5, 6, 7], &mut blocks); assert_eq!(blocks, vec![0, 1, 2, 3]); trie.find(&[1, 2, 3], &mut blocks); // Remove more than the leave blocks. assert_eq!(trie.evict(3), vec![4, 3, 2]); blocks.clear(); trie.find(&[0, 1, 2, 3, 4], &mut blocks); assert_eq!(blocks, vec![0, 1]); // Clear out the whole trie. assert_eq!(trie.evict(10), vec![1, 2, 3, 0, 1]); } #[test] fn full_match_returns_correct_node() { let mut trie = RadixTrie::new(1); trie.insert(&[0, 1, 2], &[0, 1, 2]).unwrap(); let node_id = trie.find(&[0, 1, 2], &mut vec![]); // At this point, there are only two nodes: the root and the node // with tokens 0, 1, 2. Looking up the exact prefix must return // the non-root node. assert_ne!(node_id, trie.root); } #[test] fn partial_match_does_not_recurse() { let mut trie = RadixTrie::new(1); trie.insert(&[0, 1, 2], &[0, 1, 2]).unwrap(); trie.insert(&[0, 1, 2, 3, 4, 5], &[0, 1, 2, 3, 4, 5]) .unwrap(); let mut blocks = Vec::new(); let node_id = trie.find(&[0, 1, 3, 4, 5], &mut blocks); assert_eq!(blocks, vec![0, 1]); assert_eq!(node_id, trie.find(&[0, 1], &mut blocks)) } struct AllocationWithInfo { allocation: BlockAllocation, // We are doing a lot of set operations and `FxBuildHasher` is // muc faster for a set of integers. blockset: FxHashSet<u32>, non_prefix_blocks: FxHashSet<u32>, } #[test] fn invariants_hold_on_many_operations_remove_all() { invariants_hold_on_many_insertions(true); } #[test] fn invariants_hold_on_many_operations_remove_subset() { invariants_hold_on_many_insertions(false); } fn invariants_hold_on_many_insertions(remove_all: bool) { // Small vocabulary sizes lead to violations more quickly due to // prefix sharing, etc. const VOCAB_SIZE: u32 = 2; const DATA_LEN: usize = 1_000; const MAX_PREFILL_LEN: usize = 8; const MAX_DECODE_LEN: usize = 8; let vocab_range = Uniform::new(0, VOCAB_SIZE); let data_range = Uniform::new(0, DATA_LEN); let prefill_len_range = Uniform::new(0, MAX_PREFILL_LEN); let decode_len_range = Uniform::new(0, MAX_DECODE_LEN); let mut rng = SmallRng::seed_from_u64(64); let data = (0..DATA_LEN) .map(|_| vocab_range.sample(&mut rng)) .collect::<Vec<_>>(); let mut allocator = RadixAllocator::new(1, 100, None); let mut allocations = Vec::new(); for i in 0..100_000 { // Allocate until all blocks are used. 'allocation: loop { // Use offset 0 half of the times for prefix sharing. let prefill_offset = data_range.sample(&mut rng); let prefill_len = prefill_len_range.sample(&mut rng); let decode_len = decode_len_range.sample(&mut rng); let prefill = data[prefill_offset..data.len().min(prefill_offset + prefill_len)].to_vec(); let allocation = match allocator .allocate((prefill.len() + decode_len) as u32, Some(Arc::new(prefill))) { Some(allocation) => allocation, None => break 'allocation, }; let non_prefix_blocks = allocation.blocks[allocation.prefix_len as usize..] .iter() .copied() .collect::<FxHashSet<_>>(); let blockset = allocation.blocks.iter().copied().collect::<FxHashSet<_>>(); // No duplicate blocks in an allocation. assert_eq!( allocation.blocks.len(), blockset.len(), "Duplicate blocks in allocation" ); allocations.push(AllocationWithInfo { allocation, blockset, non_prefix_blocks, }); } // Check invariants. Skip first iteration, since there is no prefix sharing yet. if i > 1 { check_allocation_invariants(&allocations); } // Remove 20% of the allocations, randomly. if remove_all { allocations.into_iter().for_each(|allocation| { allocator.free( allocation.allocation.blocks.clone(), allocation.allocation.allocation_id, ) }); allocations = Vec::new(); } else { allocations.shuffle(&mut rng); let remove_index = (allocations.len() as f64 * 0.8) as usize; for allocation in allocations.drain(remove_index..) { allocator.free( allocation.allocation.blocks.clone(), allocation.allocation.allocation_id, ); } } } } fn check_allocation_invariants(allocations: &[AllocationWithInfo]) { for i in 0..allocations.len() { let allocation = &allocations[i]; // 0 is used for health checks, must not be used. assert!( !allocation.blockset.contains(&0), "Block 0 must not be allocated" ); // No duplicate blocks in an allocation. assert_eq!( allocation.allocation.blocks.len(), allocation.blockset.len(), "Duplicate blocks in allocation" ); for other_allocation in &allocations[i + 1..] { assert!( other_allocation .non_prefix_blocks .is_disjoint(&allocation.non_prefix_blocks), "Allocations share non-prefix blocks" ) } } } }
text-generation-inference/backends/v3/src/radix.rs/0
{ "file_path": "text-generation-inference/backends/v3/src/radix.rs", "repo_id": "text-generation-inference", "token_count": 18123 }
290
import pytest from text_generation import Client, AsyncClient from text_generation.errors import NotFoundError, ValidationError from text_generation.types import FinishReason def test_generate(llama_7b_url, hf_headers): client = Client(llama_7b_url, hf_headers) response = client.generate("test", max_new_tokens=1, decoder_input_details=True) assert response.generated_text == "_" assert response.details.finish_reason == FinishReason.Length assert response.details.generated_tokens == 1 assert response.details.seed is None assert len(response.details.prefill) == 0 # assert response.details.prefill[0] == InputToken(id=1, text="<s>", logprob=None) assert len(response.details.tokens) == 1 assert response.details.tokens[0].id == 29918 assert response.details.tokens[0].text == "_" assert not response.details.tokens[0].special def test_generate_best_of(llama_7b_url, hf_headers): client = Client(llama_7b_url, hf_headers) response = client.generate( "test", max_new_tokens=1, best_of=2, do_sample=True, decoder_input_details=True ) assert response.details.seed is not None assert response.details.best_of_sequences is not None assert len(response.details.best_of_sequences) == 1 assert response.details.best_of_sequences[0].seed is not None def test_generate_not_found(fake_url, hf_headers): client = Client(fake_url, hf_headers) with pytest.raises(NotFoundError): client.generate("test") def test_generate_validation_error(llama_7b_url, hf_headers): client = Client(llama_7b_url, hf_headers) with pytest.raises(ValidationError): client.generate("test", max_new_tokens=10_000) def test_generate_stream(llama_7b_url, hf_headers): client = Client(llama_7b_url, hf_headers) responses = [ response for response in client.generate_stream("test", max_new_tokens=1) ] assert len(responses) == 1 response = responses[0] assert response.generated_text == "_" assert response.details.finish_reason == FinishReason.Length assert response.details.generated_tokens == 1 assert response.details.seed is None def test_generate_stream_not_found(fake_url, hf_headers): client = Client(fake_url, hf_headers) with pytest.raises(NotFoundError): list(client.generate_stream("test")) def test_generate_stream_validation_error(llama_7b_url, hf_headers): client = Client(llama_7b_url, hf_headers) with pytest.raises(ValidationError): list(client.generate_stream("test", max_new_tokens=10_000)) @pytest.mark.asyncio async def test_generate_async(llama_7b_url, hf_headers): client = AsyncClient(llama_7b_url, hf_headers) response = await client.generate( "test", max_new_tokens=1, decoder_input_details=True ) assert response.generated_text == "_" assert response.details.finish_reason == FinishReason.Length assert response.details.generated_tokens == 1 assert response.details.seed is None assert len(response.details.prefill) == 0 # assert response.details.prefill[0] == InputToken(id=1, text="<s>", logprob=None) # assert response.details.prefill[1] == InputToken( # id=1243, text="test", logprob=-10.96875 # ) assert len(response.details.tokens) == 1 assert response.details.tokens[0].id == 29918 assert response.details.tokens[0].text == "_" assert not response.details.tokens[0].special @pytest.mark.asyncio async def test_generate_async_best_of(llama_7b_url, hf_headers): client = AsyncClient(llama_7b_url, hf_headers) response = await client.generate( "test", max_new_tokens=1, best_of=2, do_sample=True, decoder_input_details=True ) assert response.details.seed is not None assert response.details.best_of_sequences is not None assert len(response.details.best_of_sequences) == 1 assert response.details.best_of_sequences[0].seed is not None @pytest.mark.asyncio async def test_generate_async_not_found(fake_url, hf_headers): client = AsyncClient(fake_url, hf_headers) with pytest.raises(NotFoundError): await client.generate("test") @pytest.mark.asyncio async def test_generate_async_validation_error(llama_7b_url, hf_headers): client = AsyncClient(llama_7b_url, hf_headers) with pytest.raises(ValidationError): await client.generate("test", max_new_tokens=10_000) @pytest.mark.asyncio async def test_generate_stream_async(llama_7b_url, hf_headers): client = AsyncClient(llama_7b_url, hf_headers) responses = [ response async for response in client.generate_stream("test", max_new_tokens=1) ] assert len(responses) == 1 response = responses[0] assert response.generated_text == "_" assert response.details.finish_reason == FinishReason.Length assert response.details.generated_tokens == 1 assert response.details.seed is None @pytest.mark.asyncio async def test_generate_stream_async_not_found(fake_url, hf_headers): client = AsyncClient(fake_url, hf_headers) with pytest.raises(NotFoundError): async for _ in client.generate_stream("test"): pass @pytest.mark.asyncio async def test_generate_stream_async_validation_error(llama_7b_url, hf_headers): client = AsyncClient(llama_7b_url, hf_headers) with pytest.raises(ValidationError): async for _ in client.generate_stream("test", max_new_tokens=10_000): pass
text-generation-inference/clients/python/tests/test_client.py/0
{ "file_path": "text-generation-inference/clients/python/tests/test_client.py", "repo_id": "text-generation-inference", "token_count": 2112 }
291
# Llamacpp Backend The llamacpp backend facilitates the deployment of large language models (LLMs) by integrating [llama.cpp][llama.cpp], an advanced inference engine optimized for both CPU and GPU computation. This backend is a component of Hugging Face’s **Text Generation Inference (TGI)** suite, specifically designed to streamline the deployment of LLMs in production environments. ## Key Capabilities - Full compatibility with GGUF format and all quantization formats (GGUF-related constraints may be mitigated dynamically by on-the-fly generation in future updates) - Optimized inference on CPU and GPU architectures - Containerized deployment, eliminating dependency complexity - Seamless interoperability with the Hugging Face ecosystem ## Model Compatibility This backend leverages models formatted in **GGUF**, providing an optimized balance between computational efficiency and model accuracy. You will find the best models on [Hugging Face][GGUF]. ## Build Docker image For optimal performance, the Docker image is compiled with native CPU instructions by default. As a result, it is strongly recommended to run the container on the same host architecture used during the build process. Efforts are ongoing to improve portability across different systems while preserving high computational efficiency. To build the Docker image, use the following command: ```bash docker build \ -t tgi-llamacpp \ https://github.com/huggingface/text-generation-inference.git \ -f Dockerfile_llamacpp ``` ### Build parameters | Parameter (with --build-arg) | Description | | ----------------------------------------- | -------------------------------- | | `llamacpp_version=bXXXX` | Specific version of llama.cpp | | `llamacpp_cuda=ON` | Enables CUDA acceleration | | `llamacpp_native=OFF` | Disable automatic CPU detection | | `llamacpp_cpu_arm_arch=ARCH[+FEATURE]...` | Specific ARM CPU and features | | `cuda_arch=ARCH` | Defines target CUDA architecture | For example, to target Graviton4 when building on another ARM architecture: ```bash docker build \ -t tgi-llamacpp \ --build-arg llamacpp_native=OFF \ --build-arg llamacpp_cpu_arm_arch=armv9-a+i8mm \ https://github.com/huggingface/text-generation-inference.git \ -f Dockerfile_llamacpp ``` ## Run Docker image ### CPU-based inference ```bash docker run \ -p 3000:3000 \ -e "HF_TOKEN=$HF_TOKEN" \ -v "$HOME/models:/app/models" \ tgi-llamacpp \ --model-id "Qwen/Qwen2.5-3B-Instruct" ``` ### GPU-Accelerated inference ```bash docker run \ --gpus all \ -p 3000:3000 \ -e "HF_TOKEN=$HF_TOKEN" \ -v "$HOME/models:/app/models" \ tgi-llamacpp \ --n-gpu-layers 99 --model-id "Qwen/Qwen2.5-3B-Instruct" ``` ## Using a custom GGUF GGUF files are optional as they will be automatically generated at startup if not already present in the `models` directory. However, if the default GGUF generation is not suitable for your use case, you can provide your own GGUF file with `--model-gguf`, for example: ```bash docker run \ -p 3000:3000 \ -e "HF_TOKEN=$HF_TOKEN" \ -v "$HOME/models:/app/models" \ tgi-llamacpp \ --model-id "Qwen/Qwen2.5-3B-Instruct" \ --model-gguf "models/qwen2.5-3b-instruct-q4_0.gguf" ``` Note that `--model-id` is still required. ## Advanced parameters A full listing of configurable parameters is available in the `--help`: ```bash docker run tgi-llamacpp --help ``` The table below summarizes key options: | Parameter | Description | |-------------------------------------|------------------------------------------------------------------------| | `--n-threads` | Number of threads to use for generation | | `--n-threads-batch` | Number of threads to use for batch processing | | `--n-gpu-layers` | Number of layers to store in VRAM | | `--split-mode` | Split the model across multiple GPUs | | `--defrag-threshold` | Defragment the KV cache if holes/size > threshold | | `--numa` | Enable NUMA optimizations | | `--disable-mmap` | Disable memory mapping for the model | | `--use-mlock` | Use memory locking to prevent swapping | | `--disable-offload-kqv` | Disable offloading of KQV operations to the GPU | | `--disable-flash-attention` | Disable flash attention | | `--type-k` | Data type used for K cache | | `--type-v` | Data type used for V cache | | `--validation-workers` | Number of tokenizer workers used for payload validation and truncation | | `--max-concurrent-requests` | Maximum number of concurrent requests | | `--max-input-tokens` | Maximum number of input tokens per request | | `--max-total-tokens` | Maximum number of total tokens (input + output) per request | | `--max-batch-total-tokens` | Maximum number of tokens in a batch | | `--max-physical-batch-total-tokens` | Maximum number of tokens in a physical batch | | `--max-batch-size` | Maximum number of requests per batch | --- [llama.cpp]: https://github.com/ggerganov/llama.cpp [GGUF]: https://huggingface.co/models?library=gguf&sort=trending
text-generation-inference/docs/source/backends/llamacpp.md/0
{ "file_path": "text-generation-inference/docs/source/backends/llamacpp.md", "repo_id": "text-generation-inference", "token_count": 2663 }
292
# Guidance ## What is Guidance? Guidance is a feature that allows users to constrain the generation of a large language model with a specified grammar. This feature is particularly useful when you want to generate text that follows a specific structure or uses a specific set of words or produce output in a specific format. A prominent example is JSON grammar, where the model is forced to output valid JSON. ## How is it used? Guidance can be implemented in many ways and the community is always finding new ways to use it. Here are some examples of how you can use guidance: Technically, guidance can be used to generate: - a specific JSON object - a function signature - typed output like a list of integers However these use cases can span a wide range of applications, such as: - extracting structured data from unstructured text - summarizing text into a specific format - limit output to specific classes of words (act as a LLM powered classifier) - generate the input to specific APIs or services - provide reliable and consistent output for downstream tasks - extract data from multimodal inputs ## How it works? Diving into the details, guidance is enabled by including a grammar with a generation request that is compiled, and used to modify the chosen tokens. This process can be broken down into the following steps: 1. A request is sent to the backend, it is processed and placed in batch. Processing includes compiling the grammar into a finite state machine and a grammar state. <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/request-to-batch.gif" /> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/request-to-batch-dark.gif" /> </div> 2. The model does a forward pass over the batch. This returns probabilities for each token in the vocabulary for each request in the batch. 3. The process of choosing one of those tokens is called `sampling`. The model samples from the distribution of probabilities to choose the next token. In TGI all of the steps before sampling are called `processor`. Grammars are applied as a processor that masks out tokens that are not allowed by the grammar. <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/logit-grammar-mask.gif" /> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/logit-grammar-mask-dark.gif" /> </div> 4. The grammar mask is applied and the model samples from the remaining tokens. Once a token is chosen, we update the grammar state with the new token, to prepare it for the next pass. <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/sample-logits.gif" /> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/sample-logits-dark.gif" /> </div> ## How to use Guidance? There are two main ways to use guidance; you can either use the `/generate` endpoint with a grammar or use the `/chat/completion` endpoint with tools. Under the hood tools are a special case of grammars that allows the model to choose one or none of the provided tools. Please refer to [using guidance](../basic_tutorials/using_guidance) for more examples and details on how to use guidance in Python, JavaScript, and cURL. ### Getting the most out of guidance Depending on how you are using guidance, you may want to make use of different features. Here are some tips to get the most out of guidance: - If you are using the `/generate` with a `grammar` it is recommended to include the grammar in the prompt prefixed by something like `Please use the following JSON schema to generate the output:`. This will help the model understand the context of the grammar and generate the output accordingly. - If you are getting a response with many repeated tokens, please use the `frequency_penalty` or `repetition_penalty` to reduce the number of repeated tokens in the output.
text-generation-inference/docs/source/conceptual/guidance.md/0
{ "file_path": "text-generation-inference/docs/source/conceptual/guidance.md", "repo_id": "text-generation-inference", "token_count": 1237 }
293
# Multi-backend support TGI (Text Generation Inference) offers flexibility by supporting multiple backends for serving large language models (LLMs). With multi-backend support, you can choose the backend that best suits your needs, whether you prioritize performance, ease of use, or compatibility with specific hardware. API interaction with TGI remains consistent across backends, allowing you to switch between them seamlessly. **Supported backends:** * **TGI CUDA backend**: This high-performance backend is optimized for NVIDIA GPUs and serves as the default option within TGI. Developed in-house, it boasts numerous optimizations and is used in production by various projects, including those by Hugging Face. * **[TGI TRTLLM backend](./backends/trtllm)**: This backend leverages NVIDIA's TensorRT library to accelerate LLM inference. It utilizes specialized optimizations and custom kernels for enhanced performance. However, it requires a model-specific compilation step for each GPU architecture. * **[TGI Llamacpp backend](./backends/llamacpp)**: This backend facilitates the deployment of large language models (LLMs) by integrating [llama.cpp][llama.cpp], an advanced inference engine optimized for both CPU and GPU computation. * **[TGI Neuron backend](./backends/neuron)**: This backend leverages the [AWS Neuron SDK](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/) to allow the deployment of large language models (LLMs) on [AWS Trainium and Inferentia chips](https://aws.amazon.com/ai/machine-learning/trainium/).
text-generation-inference/docs/source/multi_backend_support.md/0
{ "file_path": "text-generation-inference/docs/source/multi_backend_support.md", "repo_id": "text-generation-inference", "token_count": 381 }
294
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": null, "tokens": [ { "id": 323, "logprob": -1.1171875, "special": false, "text": " and" }, { "id": 1268, "logprob": -0.9477539, "special": false, "text": " how" }, { "id": 1587, "logprob": -0.51464844, "special": false, "text": " does" }, { "id": 433, "logprob": -0.043182373, "special": false, "text": " it" }, { "id": 1782, "logprob": -1.0810547, "special": false, "text": " differ" }, { "id": 505, "logprob": -0.005054474, "special": false, "text": " from" }, { "id": 8776, "logprob": -0.47485352, "special": false, "text": " traditional" }, { "id": 5780, "logprob": -0.15112305, "special": false, "text": " machine" }, { "id": 6975, "logprob": -0.0011291504, "special": false, "text": " learning" }, { "id": 5380, "logprob": -0.31323242, "special": false, "text": "?\n" } ], "top_tokens": null }, "generated_text": " and how does it differ from traditional machine learning?\n" }
text-generation-inference/integration-tests/models/__snapshots__/test_compressed_tensors_w8a8_int/test_compressed_tensors_w8a8_int.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_compressed_tensors_w8a8_int/test_compressed_tensors_w8a8_int.json", "repo_id": "text-generation-inference", "token_count": 873 }
295
{ "choices": [ { "finish_reason": "length", "index": 0, "logprobs": null, "message": { "content": " the royal mouse? It is a little more slender and only weighs around 1.5 pounds for males and 1.3 pounds", "role": "assistant" } } ], "created": 1732541190, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "chat.completion", "system_fingerprint": "2.4.1-dev0-native", "usage": { "completion_tokens": 30, "prompt_tokens": 73, "total_tokens": 103 } }
text-generation-inference/integration-tests/models/__snapshots__/test_continue_final_message/test_llama_completion_single_prompt_continue.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_continue_final_message/test_llama_completion_single_prompt_continue.json", "repo_id": "text-generation-inference", "token_count": 260 }
296
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": null, "tokens": [ { "id": 29946, "logprob": -1.4765625, "special": false, "text": "4" }, { "id": 29906, "logprob": -0.9199219, "special": false, "text": "2" }, { "id": 29889, "logprob": 0.0, "special": false, "text": "." }, { "id": 29896, "logprob": -1.1367188, "special": false, "text": "1" }, { "id": 29889, "logprob": -1.4648438, "special": false, "text": "." }, { "id": 29896, "logprob": -0.40722656, "special": false, "text": "1" }, { "id": 29889, "logprob": -0.17419434, "special": false, "text": "." }, { "id": 29896, "logprob": -0.20251465, "special": false, "text": "1" }, { "id": 29900, "logprob": -1.5527344, "special": false, "text": "0" }, { "id": 29896, "logprob": -1.3710938, "special": false, "text": "1" } ], "top_tokens": null }, "generated_text": "42.1.1.101" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_grammar_llama/test_flash_llama_grammar_regex.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_grammar_llama/test_flash_llama_grammar_regex.json", "repo_id": "text-generation-inference", "token_count": 860 }
297
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": 0, "tokens": [ { "id": 1313, "logprob": -2.3613281, "special": false, "text": "It" }, { "id": 3969, "logprob": -0.7285156, "special": false, "text": " seems" }, { "id": 298, "logprob": -1.3466797, "special": false, "text": " to" }, { "id": 528, "logprob": 0.0, "special": false, "text": " me" }, { "id": 28725, "logprob": -1.6757812, "special": false, "text": "," }, { "id": 369, "logprob": 0.0, "special": false, "text": " that" }, { "id": 513, "logprob": -1.1269531, "special": false, "text": " if" }, { "id": 368, "logprob": 0.0, "special": false, "text": " you" }, { "id": 28742, "logprob": -2.4921875, "special": false, "text": "'" }, { "id": 267, "logprob": 0.0, "special": false, "text": "re" } ], "top_tokens": null }, "generated_text": "What is gradient descent?\n\nIt seems to me, that if you're" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_mixtral/test_flash_mixtral_all_params.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_mixtral/test_flash_mixtral_all_params.json", "repo_id": "text-generation-inference", "token_count": 858 }
298
{ "details": { "best_of_sequences": null, "finish_reason": "stop_sequence", "generated_tokens": 6, "prefill": [], "seed": 0, "tokens": [ { "id": 284, "logprob": -0.28955078, "special": false, "text": " to" }, { "id": 3758, "logprob": -0.7739258, "special": false, "text": " send" }, { "id": 1366, "logprob": -0.85253906, "special": false, "text": " data" }, { "id": 625, "logprob": -0.8984375, "special": false, "text": " over" }, { "id": 257, "logprob": -1.0830078, "special": false, "text": " a" }, { "id": 3127, "logprob": -1.9404297, "special": false, "text": " network" } ], "top_tokens": null }, "generated_text": "Test request to send data over a network" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_phi/test_flash_phi_all_params.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_phi/test_flash_phi_all_params.json", "repo_id": "text-generation-inference", "token_count": 568 }
299
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": null, "tokens": [ { "id": 1241, "logprob": -0.9863281, "special": false, "text": "():" }, { "id": 258, "logprob": -0.21447754, "special": false, "text": "\n " }, { "id": 942, "logprob": -0.43701172, "special": false, "text": " print" }, { "id": 372, "logprob": -0.5361328, "special": false, "text": "(\"" }, { "id": 7371, "logprob": -0.44555664, "special": false, "text": "Hello" }, { "id": 9956, "logprob": -1.2412109, "special": false, "text": " World" }, { "id": 8657, "logprob": -0.7583008, "special": false, "text": "!\")" }, { "id": 185, "logprob": -0.76171875, "special": false, "text": "\n" }, { "id": 185, "logprob": -0.20837402, "special": false, "text": "\n" }, { "id": 1018, "logprob": -1.2470703, "special": false, "text": "print" } ] }, "generated_text": "():\n print(\"Hello World!\")\n\nprint" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_santacoder/test_flash_santacoder.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_santacoder/test_flash_santacoder.json", "repo_id": "text-generation-inference", "token_count": 866 }
300
{ "choices": [ { "finish_reason": "stop", "index": 0, "logprobs": null, "message": { "content": "{ \"unit\": \"fahrenheit\", \"temperature\": [ 72, 79, 88 ] }", "role": "assistant" } } ], "created": 1740095072, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "chat.completion", "system_fingerprint": "3.1.1-dev0-native", "usage": { "completion_tokens": 29, "prompt_tokens": 135, "total_tokens": 164 } }
text-generation-inference/integration-tests/models/__snapshots__/test_grammar_response_format_llama/test_grammar_response_format_llama_json.1.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_grammar_response_format_llama/test_grammar_response_format_llama_json.1.json", "repo_id": "text-generation-inference", "token_count": 255 }
301
[ { "choices": [ { "delta": { "content": "Once", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741695408, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": " upon", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741695408, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": " a", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741695408, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": " time", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741695408, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": ",", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741695408, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": " in", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741695408, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": " a", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741695408, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": " vibrant", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741695408, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": " ocean", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741695408, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": " filled", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741695408, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": " with", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741695408, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": " coral", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741695408, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": " reefs", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741695408, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": " and", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741695408, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": " schools", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741695408, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": " of", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741695408, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": " shimmer", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741695408, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": "ing", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741695408, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": " fish", "role": "assistant", "tool_calls": null }, "finish_reason": null, "index": 0, "logprobs": null } ], "created": 1741695408, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "3.1.2-dev0-native", "usage": null }, { "choices": [ { "delta": { "content": ",", "role": "assistant", "tool_calls": null }, "finish_reason": "length", "index": 0, "logprobs": null } ], "created": 1741695408, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", "system_fingerprint": "3.1.2-dev0-native", "usage": null } ]
text-generation-inference/integration-tests/models/__snapshots__/test_tools_llama/test_flash_llama_grammar_tools_sea_creatures_stream_auto.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_tools_llama/test_flash_llama_grammar_tools_sea_creatures_stream_auto.json", "repo_id": "text-generation-inference", "token_count": 4845 }
302
import pytest @pytest.fixture(scope="module") def chat_handle(launcher): with launcher( "meta-llama/Meta-Llama-3.1-8B-Instruct", ) as handle: yield handle @pytest.fixture(scope="module") async def chat_client(chat_handle): await chat_handle.health(300) return chat_handle.client
text-generation-inference/integration-tests/models/test_chat_stream_options.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_chat_stream_options.py", "repo_id": "text-generation-inference", "token_count": 128 }
303
import pytest @pytest.fixture(scope="module") def flash_gpt2_handle(launcher): with launcher("openai-community/gpt2", num_shard=2) as handle: yield handle @pytest.fixture(scope="module") async def flash_gpt2(flash_gpt2_handle): await flash_gpt2_handle.health(300) return flash_gpt2_handle.client @pytest.mark.release @pytest.mark.asyncio async def test_flash_gpt2(flash_gpt2, response_snapshot): response = await flash_gpt2.generate( "What is deep learning?", max_new_tokens=10, decoder_input_details=True, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.release @pytest.mark.asyncio async def test_flash_gpt2_load(flash_gpt2, generate_load, response_snapshot): responses = await generate_load( flash_gpt2, "What is deep learning?", max_new_tokens=10, n=4, ) generated_texts = [r.generated_text for r in responses] assert len(generated_texts) == 4 assert all( [text == generated_texts[0] for text in generated_texts] ), generated_texts assert responses == response_snapshot
text-generation-inference/integration-tests/models/test_flash_gpt2.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_flash_gpt2.py", "repo_id": "text-generation-inference", "token_count": 476 }
304
import pytest @pytest.fixture(scope="module") def flash_neox_handle(launcher): with launcher("stabilityai/stablelm-tuned-alpha-3b", num_shard=1) as handle: yield handle @pytest.fixture(scope="module") async def flash_neox(flash_neox_handle): await flash_neox_handle.health(300) return flash_neox_handle.client @pytest.mark.release @pytest.mark.skip @pytest.mark.asyncio async def test_flash_neox(flash_neox, response_snapshot): response = await flash_neox.generate( "<|USER|>What's your mood today?<|ASSISTANT|>", max_new_tokens=10, decoder_input_details=True, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.release @pytest.mark.skip @pytest.mark.asyncio async def test_flash_neox_load(flash_neox, generate_load, response_snapshot): responses = await generate_load( flash_neox, "<|USER|>What's your mood today?<|ASSISTANT|>", max_new_tokens=10, n=4, ) generated_texts = [r.generated_text for r in responses] assert len(generated_texts) == 4 assert all( [text == generated_texts[0] for text in generated_texts] ), generated_texts assert responses == response_snapshot
text-generation-inference/integration-tests/models/test_flash_neox.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_flash_neox.py", "repo_id": "text-generation-inference", "token_count": 514 }
305
import pytest @pytest.fixture(scope="module") def idefics_handle(launcher): with launcher( "HuggingFaceM4/idefics-9b-instruct", num_shard=2, dtype="float16" ) as handle: yield handle @pytest.fixture(scope="module") async def idefics(idefics_handle): await idefics_handle.health(300) return idefics_handle.client @pytest.mark.asyncio async def test_idefics(idefics, response_snapshot, chicken): response = await idefics.generate( f"User:![]({chicken})Can you tell me a very short story based on the image?", max_new_tokens=10, decoder_input_details=True, ) assert response.details.generated_tokens == 10 assert ( response.generated_text == " \nAssistant: A rooster stands" ), f"{repr(response.generated_text)}" assert response == response_snapshot @pytest.mark.release @pytest.mark.asyncio @pytest.mark.private async def test_idefics_two_images(idefics, response_snapshot, chicken, cow_beach): response = await idefics.generate( f"User:![]({chicken})![]({cow_beach})Where are the cow and chicken?<end_of_utterance> \nAssistant:", max_new_tokens=20, ) assert ( response.generated_text == " The cow and chicken are standing on a beach." ), f"{repr(response.generated_text)}" assert response == response_snapshot @pytest.mark.release @pytest.mark.asyncio async def test_idefics_load(idefics, generate_load, response_snapshot, chicken): responses = await generate_load( idefics, f"User:![]({chicken})Can you tell me a very short story based on the image?", max_new_tokens=10, n=4, ) generated_texts = [r.generated_text for r in responses] assert generated_texts[0] == " \nAssistant: A rooster stands" assert len(generated_texts) == 4 assert generated_texts, all( [text == generated_texts[0] for text in generated_texts] ) assert responses == response_snapshot
text-generation-inference/integration-tests/models/test_idefics.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_idefics.py", "repo_id": "text-generation-inference", "token_count": 783 }
306
# import base64 # from io import BytesIO # from PIL import Image # # import pytest # # # @pytest.fixture(scope="module") # def flash_llama4_handle(launcher): # with launcher("ll-re/Llama-4-Scout-17B-16E-Instruct", num_shard=8) as handle: # yield handle # # # @pytest.fixture(scope="module") # async def flash_llama4(flash_llama4_handle): # await flash_llama4_handle.health(300) # return flash_llama4_handle.client # # # async def test_flash_llama4(flash_llama4, response_snapshot): # response = await flash_llama4.generate( # "Hello I am doing a project on the 1918 flu pandemic and I am trying to find out how many", # seed=42, # max_new_tokens=100, # ) # # assert ( # response.generated_text # == " people died in the 1918 flu pandemic. Estimating the death toll of the 1918 flu pandemic is difficult because of incomplete records and because of the fact that many of the extra deaths were not attributed to the flu. Many experts believe that the 1918 flu pandemic killed between 50 and 100 million people. Iassistant\n\nThe 1918 flu pandemic, also known as the Spanish flu, is indeed one of the most devastating public health crises in human history. Estimating the exact" # ) # assert response.details.generated_tokens == 100 # assert response == response_snapshot # # # async def test_flash_llama4_image_cow_dog(flash_llama4, response_snapshot): # image_url = "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png" # response = await flash_llama4.chat( # seed=42, # messages=[ # { # "role": "user", # "content": [ # {"type": "image_url", "image_url": {"url": image_url}}, # { # "type": "text", # "text": "What is the breed of the dog in the image?", # }, # ], # }, # ], # max_tokens=100, # ) # # assert ( # response.choices[0].message.content # == "The image does not depict a dog; it shows a cow standing on a beach. Therefore, there is no breed of a dog to identify." # ) # assert response.usage["completion_tokens"] == 30 # assert response == response_snapshot # # # async def test_flash_llama4_image_cow(flash_llama4, response_snapshot): # image_url = "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png" # response = await flash_llama4.chat( # seed=42, # messages=[ # { # "role": "user", # "content": [ # {"type": "image_url", "image_url": {"url": image_url}}, # {"type": "text", "text": "What is shown in this image?"}, # ], # }, # ], # max_tokens=100, # ) # assert ( # response.choices[0].message.content # == "The image shows a brown cow standing on the beach with a white face and black and white marking on its ears. The cow has a white patch around its nose and mouth. The ocean and blue sky are in the background." # ) # assert response.usage["completion_tokens"] == 46 # assert response == response_snapshot # # # # Helper function to convert a Pillow image to a base64 data URL # def image_to_data_url(img: Image.Image, fmt: str) -> str: # buffer = BytesIO() # img.save(buffer, format=fmt) # img_data = buffer.getvalue() # b64_str = base64.b64encode(img_data).decode("utf-8") # mime_type = "image/png" if fmt.upper() == "PNG" else "image/jpeg" # return f"data:{mime_type};base64,{b64_str}" # # # async def test_flash_llama4_image_base64_rgba(flash_llama4, response_snapshot): # # Create an empty 100x100 PNG image with alpha (transparent background) # img = Image.new("RGBA", (100, 100), (0, 0, 0, 0)) # data_url = image_to_data_url(img, "PNG") # response = await flash_llama4.chat( # seed=42, # messages=[ # { # "role": "user", # "content": [ # {"type": "image_url", "image_url": {"url": data_url}}, # { # "type": "text", # "text": "What do you see in this transparent image?", # }, # ], # }, # ], # max_tokens=100, # ) # assert response == response_snapshot # # # async def test_flash_llama4_image_base64_rgb_png(flash_llama4, response_snapshot): # # Create an empty 100x100 PNG image without alpha (white background) # img = Image.new("RGB", (100, 100), (255, 255, 255)) # data_url = image_to_data_url(img, "PNG") # response = await flash_llama4.chat( # seed=42, # messages=[ # { # "role": "user", # "content": [ # {"type": "image_url", "image_url": {"url": data_url}}, # {"type": "text", "text": "What do you see in this plain image?"}, # ], # }, # ], # max_tokens=100, # ) # assert response == response_snapshot # # # async def test_flash_llama4_image_base64_rgb_jpg(flash_llama4, response_snapshot): # # Create an empty 100x100 JPEG image (white background) # img = Image.new("RGB", (100, 100), (255, 255, 255)) # data_url = image_to_data_url(img, "JPEG") # response = await flash_llama4.chat( # seed=42, # messages=[ # { # "role": "user", # "content": [ # {"type": "image_url", "image_url": {"url": data_url}}, # {"type": "text", "text": "What do you see in this JPEG image?"}, # ], # }, # ], # max_tokens=100, # ) # assert response == response_snapshot
text-generation-inference/integration-tests/models/test_transformers_llama4.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_transformers_llama4.py", "repo_id": "text-generation-inference", "token_count": 2864 }
307
import json def main(): with open("./ShareGPT_V3_unfiltered_cleaned_split.json", "r") as f: data = json.load(f) # Select only the first 2k conversations that start with a human. max = 2000 conversations = [] for conversation in data: conv = conversation.get("conversations") if conv and conv[0]["from"] == "human": # Trim the rest of the output conversation["conversations"] = conversation["conversations"][:1] conversations.append(conversation) if len(conversation) >= max: break with open("./small.json", "w") as f: data = json.dump(conversations, f, indent=4) if __name__ == "__main__": main()
text-generation-inference/load_tests/filter.py/0
{ "file_path": "text-generation-inference/load_tests/filter.py", "repo_id": "text-generation-inference", "token_count": 307 }
308
# Router Also named `webserver` throughout the docs. This router is handling most of the logic to handle the "batches" tell when to pass new `prefill` requests and pausing `decode` requests, which ones etc... It uses gRPC to communicate with the shards which can therefore be kept much simpler and focus on having the most efficient forward passes as possible. ## Continuous batching One important feature of `text-generation-inference` is enabled by this `router`. Continuous batching is the act of regularly running queries in the same `forward` step of the LLM (a "batch") and also removing them when they are finished. In order for continuous batching to be useful, you need to have more compute available with respect to the memory requirements of your model. This is essentially true for LLMs and the larger the model, the truer it gets (since you have to pool multiple GPUs to load the model, you effectively have a lot of compute power at your hands). Static batching is the act of doing several queries at the same time, but usually this is controlled by the client, and therefore the amount of batching is decided beforehand. For text-generation, and LLMs which are memory bound we can try to be much more efficient with the available compute, by having client sending us single queries, and let the router mix&match queries into or out of batches to make the use the compute the most efficiently. This is possible because for LLMs the total compute for running the model is much bigger than doing mix&match of the batches themselves. ### Simple continuous batching text-generation works by feeding a prompt to a model, and iteratively calling `forward` on the model to produce new text, 1 token at a time. The first idea is simple, when a query arrives, we start working on it directly. When new queries arrive, we simply wait for the current `forward` to be finished then batch the current running prompt with the new query, and call `forward`. Whenever either query is finished: either the model produce EOS (end of sentence) token or the query reached the allowed limit. We simply drop it from the batch, remove all the allocated memory and we can continue with the rest until nothing is left. This simple idea generalizes very well and we could potentially stack many requests in the same batch. One thing to note, is that queries can be potentially run with different parameters meaning different way to choose the next token (sampling, not sampling, temperature, top_k etc..). This is not problematic for the proposed approach we just need to do the sampling independantly on each member of the batch. ### Prefill, decode and past key values In order to make LLMs and text-generation efficient, there's actually a very powerful trick that can be used, which is the "caching" of some attention matrices. [More on that in the first part of this blog](https://huggingface.co/blog/accelerated-inference#getting-to-the-first-10x-speedup) What this means, is that the first "pass" of a prompt is different from the subsequent "forward" passes. Since for the first one we have to compute the entire attention matrix, whereas in the follow-ups only require to compute the new token attention. The first pass is called `prefill` throughout this codebase where as the follow-ups are called `decode`. Since `prefill` is much more expensive than `decode` we don't want to do it all the time, but a currently running query is probably doing `decode`. If we want to do the continuous batching as explained previously we need to run `prefill` at some point in order to create the attention matrix required to be able to join the `decode` group. `text-generation-inference` uses a bunch of different strategies and parameters in order to enable you to find the sweet spot between exploiting the hardware and perceived latency. With no continuous batching at all, latency is going to be super good, but throughput (meaning the total number of requests allowed in a given timeframe) is going to be super bad (since it's essentially 1). With static batching, you can probably reach the maximum throughput (by using the maximum total batch size applicable to your hardware), but the latency is super bad since in order to have maximum throughput you need to wait for requests to come in before processing. With continuous batching you can find a sweet spot. In general latency is the most critical parameter users care about. But a 2x latency slowdown for 10x more users on the same hardware is an acceptable tradeoff. ## Token streaming This is a very important aspect of client UX. As mentionned above, latency is the most critical perceived quality of an LLM API. With token streaming, the server can start answering after the first `prefill` pass directly, without waiting for all the generation to be done. For extremely long queries this means clients can start to see something happening orders of magnitude before the work is done. Seeing something in progress allows them to cut short if it's not what's wanted but also it "feels" better.
text-generation-inference/router/README.md/0
{ "file_path": "text-generation-inference/router/README.md", "repo_id": "text-generation-inference", "token_count": 1175 }
309
#!/bin/bash if [[ -z "${HF_MODEL_ID}" ]]; then echo "HF_MODEL_ID must be set" exit 1 fi export MODEL_ID="${HF_MODEL_ID}" if [[ -n "${HF_MODEL_REVISION}" ]]; then export REVISION="${HF_MODEL_REVISION}" fi if [[ -n "${SM_NUM_GPUS}" ]]; then export NUM_SHARD="${SM_NUM_GPUS}" fi if [[ -n "${HF_MODEL_QUANTIZE}" ]]; then export QUANTIZE="${HF_MODEL_QUANTIZE}" fi if [[ -n "${HF_MODEL_TRUST_REMOTE_CODE}" ]]; then export TRUST_REMOTE_CODE="${HF_MODEL_TRUST_REMOTE_CODE}" fi text-generation-launcher --port 8080
text-generation-inference/sagemaker-entrypoint.sh/0
{ "file_path": "text-generation-inference/sagemaker-entrypoint.sh", "repo_id": "text-generation-inference", "token_count": 239 }
310
// Adapted from turboderp exllama: https://github.com/turboderp/exllama #ifndef _cuda_compat_cuh #define _cuda_compat_cuh // atomicAdd for half types, to support CC < 7.x __device__ __forceinline__ void atomicAdd_half(half* address, half val) { unsigned int * address_as_ui = (unsigned int *) ((char *)address - ((size_t)address & 2)); unsigned int old = *address_as_ui; unsigned int assumed; do { assumed = old; __half_raw hsum; hsum.x = (size_t)address & 2 ? (old >> 16) : (old & 0xffff); half tmpres = __hadd(hsum, val); hsum = __half_raw(tmpres); old = (size_t)address & 2 ? (old & 0xffff) | (hsum.x << 16) : (old & 0xffff0000) | hsum.x; old = atomicCAS(address_as_ui, assumed, old); } while (assumed != old); } // atomicAdd for half2 types __device__ __forceinline__ void atomicAdd_half2(half2* address, half2 val) { unsigned int* address_as_ui = (unsigned int*)address; unsigned int old = *address_as_ui; unsigned int assumed; do { assumed = old; half2 old_val = *((half2*)&old); half2 new_val = __hadd2(old_val, val); old = atomicCAS(address_as_ui, assumed, *((unsigned int*)&new_val)); } while (assumed != old); } // #if defined(__CUDA_ARCH__) || defined(USE_ROCM) #if __CUDA_ARCH__ < 700 || defined(USE_ROCM) __device__ __forceinline__ void atomicAdd(half* address, half val) { atomicAdd_half(address, val); } #if __CUDA_ARCH__ < 600 || defined(USE_ROCM) __device__ __forceinline__ void atomicAdd(half2* address, half2 val) { atomicAdd_half2(address, val); } #endif #endif #endif #endif
text-generation-inference/server/exllama_kernels/exllama_kernels/cu_compat.cuh/0
{ "file_path": "text-generation-inference/server/exllama_kernels/exllama_kernels/cu_compat.cuh", "repo_id": "text-generation-inference", "token_count": 692 }
311
#ifndef _util_h #define _util_h #define DBGS(__x) printf("%s\n", __x) #define DBGI(__x) printf("%s: %i\n", #__x, __x) #define DBGI2(__x, __y) printf("%s, %s: %i, %i\n", #__x, #__y, __x, __y) #define DBGI3(__x, __y, __z) printf("%s, %s, %s: %i, %i, %i\n", #__x, #__y, #__z, __x, __y, __z) #define DBGF(__x) printf("%s: %f\n", #__x, __x) #define DBGF2(__x, __y) printf("%s, %s: %f, %f\n", #__x, #__y, __x, __y) #define DBGF3(__x, __y, __z) printf("%s, %s, %s: %f, %f, %f\n", #__x, #__y, #__z, __x, __y, __z) #endif
text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cpp/util.h/0
{ "file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cpp/util.h", "repo_id": "text-generation-inference", "token_count": 296 }
312
#ifndef _util_cuh #define _util_cuh #include <cuda_runtime.h> #include <cuda_fp16.h> #include <cstdint> #include <cstdio> #include <ATen/cuda/CUDAContext.h> #define DIVIDE(x, size) (((x) + (size) - 1) / (size)) #define DBGS(__x) printf("%s\n", __x) #define DBGI(__x) printf("%s: %i\n", #__x, __x) #define DBGI2(__x, __y) printf("%s, %s: %i, %i\n", #__x, #__y, __x, __y) #define DBGI3(__x, __y, __z) printf("%s, %s, %s: %i, %i, %i\n", #__x, #__y, #__z, __x, __y, __z) #define DBGX(__x) printf("%s: %x\n", #__x, __x) #define DBGX2(__x, __y) printf("%s, %s: %x, %x\n", #__x, #__y, __x, __y) #define DBGX3(__x, __y, __z) printf("%s, %s, %s: %x, %x, %x\n", #__x, #__y, #__z, __x, __y, __z) #define DBGF(__x) printf("%s: %f\n", #__x, __x) #define DBGF2(__x, __y) printf("%s, %s: %f, %f\n", #__x, #__y, __x, __y) #define DBGF3(__x, __y, __z) printf("%s, %s, %s: %f, %f, %f\n", #__x, #__y, #__z, __x, __y, __z) #define DBGH(__x) printf("%s: %f\n", #__x, __half2float(__x)) #define DBGH2(__x, __y) printf("%s, %s: %f, %f\n", #__x, #__y, __half2float(__x), __half2float(__y)) #define DBGH3(__x, __y, __z) printf("%s, %s, %s: %f, %f, %f\n", #__x, #__y, #__z, __half2float(__x), __half2float(__y), __half2float(__z)) #define DBGIH(__x, __y) printf("%s, %s: %i, %f\n", #__x, #__y, __x, __half2float(__y)) #define DBGIH2(__x, __y, __z) printf("%s, %s, %s: %i, %f, %f\n", #__x, #__y, #__z, __x, __half2float(__y), __half2float(__z)) __forceinline__ __device__ half dq_scale_(const int qs, const half max_scale) { half qs_h = __hmul(__int2half_rn(qs + 1), __float2half_rn(1.0f / 16.0f)); qs_h = __hmul(qs_h, qs_h); qs_h = __hmul(qs_h, max_scale); return qs_h; } __forceinline__ __device__ float clamp(float x, float a, float b) { return fmaxf(a, fminf(b, x)); } #define cuda_check(ans) { gpu_assert((ans), __FILE__, __LINE__); } inline void gpu_assert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"CUDA error: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } void print_global_mem(const half* ptr, int rows, int columns, int stride); #endif
text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/util.cuh/0
{ "file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/util.cuh", "repo_id": "text-generation-inference", "token_count": 1115 }
313
import pytest from unittest.mock import Mock from text_generation_server.utils.adapter import ( get_attn_weights, get_mlp_weights, parse_lora_adapters, AdapterInfo, ) def test_parse_lora_adapters_empty(): assert parse_lora_adapters(None) == [] assert parse_lora_adapters("") == [] def test_parse_lora_adapters_single(): result = parse_lora_adapters("adapter1") assert result == [AdapterInfo(id="adapter1", path=None, revision=None)] def test_parse_lora_adapters_with_path(): result = parse_lora_adapters("adapter1=path/to/adapter1") assert result == [ AdapterInfo(id="adapter1", path="path/to/adapter1", revision=None) ] def test_parse_lora_adapters_with_path_and_revision(): result = parse_lora_adapters("adapter1=path/to/adapter1@main") assert result == [ AdapterInfo(id="adapter1", path="path/to/adapter1", revision="main") ] def test_parse_lora_adapters_multiple(): result = parse_lora_adapters( "adapter1,adapter2=path/to/adapter2,adapter3=path/to/adapter3@dev" ) assert result == [ AdapterInfo(id="adapter1", path=None, revision=None), AdapterInfo(id="adapter2", path="path/to/adapter2", revision=None), AdapterInfo(id="adapter3", path="path/to/adapter3", revision="dev"), ] def test_parse_lora_adapters_invalid_format(): try: parse_lora_adapters("adapter1,invalid=format=test,adapter3") assert False, "Should have raised ValueError" except ValueError as e: assert str(e) == "Invalid LoRA adapter format: invalid=format=test" def test_get_attn_weights(): # create a mock layer mock_layer = Mock() mock_layer.self_attn.query_key_value = Mock() mock_layer.self_attn.o_proj = Mock() # call the function result = get_attn_weights(2, mock_layer) # assert the result expected = { (2, "q_proj"): ( "model.layers.2.self_attn.q_proj", mock_layer.self_attn.query_key_value, ), (2, "k_proj"): ( "model.layers.2.self_attn.k_proj", mock_layer.self_attn.query_key_value, ), (2, "qkv_proj"): ( "model.layers.2.self_attn.qkv_proj", mock_layer.self_attn.query_key_value, ), (2, "v_proj"): ( "model.layers.2.self_attn.v_proj", mock_layer.self_attn.query_key_value, ), (2, "o_proj"): ("model.layers.2.self_attn.o_proj", mock_layer.self_attn.o_proj), } assert result == expected def test_get_mlp_weights_with_gate_up_proj(): # create a mock layer with gate_up_proj mock_layer = Mock() mock_layer.mlp.gate_up_proj = Mock() mock_layer.mlp.down_proj = Mock() # call the function result = get_mlp_weights(3, mock_layer) # assert the result expected = { (3, "c_fc"): ("model.layers.3.mlp.c_fc", mock_layer.mlp.c_fc), (3, "c_proj"): ("model.layers.3.mlp.c_proj", mock_layer.mlp.c_proj), (3, "gate_proj"): ("model.layers.3.mlp.gate_proj", mock_layer.mlp.gate_up_proj), (3, "up_proj"): ("model.layers.3.mlp.up_proj", mock_layer.mlp.gate_up_proj), (3, "down_proj"): ("model.layers.3.mlp.down_proj", mock_layer.mlp.down_proj), } assert result == expected def test_get_mlp_weights_without_gate_up_proj(): # create a mock layer without gate_up_proj mock_layer = Mock() mock_layer.mlp = Mock(spec=[]) # call the function result = get_mlp_weights(1, mock_layer) # assert the result assert result == {} @pytest.mark.parametrize("layer_index", [0, 1, 5]) def test_get_attn_weights_different_layers(layer_index): mock_layer = Mock() mock_layer.self_attn.query_key_value = Mock() mock_layer.self_attn.o_proj = Mock() result = get_attn_weights(layer_index, mock_layer) for k in ["q", "k", "v"]: assert (layer_index, f"{k}_proj") in result assert ( result[(layer_index, f"{k}_proj")][0] == f"model.layers.{layer_index}.self_attn.{k}_proj" ) assert (layer_index, "o_proj") in result assert ( result[(layer_index, "o_proj")][0] == f"model.layers.{layer_index}.self_attn.o_proj" ) @pytest.mark.parametrize("layer_index", [0, 1, 5]) def test_get_mlp_weights_different_layers(layer_index): mock_layer = Mock() mock_layer.mlp.gate_up_proj = Mock() mock_layer.mlp.down_proj = Mock() result = get_mlp_weights(layer_index, mock_layer) for k in ["gate", "up", "down"]: assert (layer_index, f"{k}_proj") in result assert ( result[(layer_index, f"{k}_proj")][0] == f"model.layers.{layer_index}.mlp.{k}_proj" ) def test_get_attn_weights_llama_compatibility(): mock_layer = Mock() mock_layer.self_attn.query_key_value = Mock() mock_layer.self_attn.o_proj = Mock() result = get_attn_weights(2, mock_layer) expected = { (2, "q_proj"): ( "model.layers.2.self_attn.q_proj", mock_layer.self_attn.query_key_value, ), (2, "k_proj"): ( "model.layers.2.self_attn.k_proj", mock_layer.self_attn.query_key_value, ), (2, "qkv_proj"): ( "model.layers.2.self_attn.qkv_proj", mock_layer.self_attn.query_key_value, ), (2, "v_proj"): ( "model.layers.2.self_attn.v_proj", mock_layer.self_attn.query_key_value, ), (2, "o_proj"): ("model.layers.2.self_attn.o_proj", mock_layer.self_attn.o_proj), } assert result == expected def test_get_mlp_weights_llama_compatibility(): mock_layer = Mock() mock_layer.mlp.gate_up_proj = Mock() mock_layer.mlp.down_proj = Mock() result = get_mlp_weights(3, mock_layer) expected = { (3, "c_fc"): ("model.layers.3.mlp.c_fc", mock_layer.mlp.c_fc), (3, "c_proj"): ("model.layers.3.mlp.c_proj", mock_layer.mlp.c_proj), (3, "gate_proj"): ("model.layers.3.mlp.gate_proj", mock_layer.mlp.gate_up_proj), (3, "up_proj"): ("model.layers.3.mlp.up_proj", mock_layer.mlp.gate_up_proj), (3, "down_proj"): ("model.layers.3.mlp.down_proj", mock_layer.mlp.down_proj), } assert result == expected def test_get_attn_weights_gemma_compatibility(): mock_layer = Mock() mock_layer.self_attn.query_key_value = Mock() mock_layer.self_attn.o_proj = Mock() result = get_attn_weights(2, mock_layer) expected = { (2, "q_proj"): ( "model.layers.2.self_attn.q_proj", mock_layer.self_attn.query_key_value, ), (2, "k_proj"): ( "model.layers.2.self_attn.k_proj", mock_layer.self_attn.query_key_value, ), (2, "qkv_proj"): ( "model.layers.2.self_attn.qkv_proj", mock_layer.self_attn.query_key_value, ), (2, "v_proj"): ( "model.layers.2.self_attn.v_proj", mock_layer.self_attn.query_key_value, ), (2, "o_proj"): ("model.layers.2.self_attn.o_proj", mock_layer.self_attn.o_proj), } assert result == expected def test_get_mlp_weights_gemma_compatibility(): mock_layer = Mock() mock_layer.mlp.gate_proj = Mock() mock_layer.mlp.up_proj = Mock() mock_layer.mlp.down_proj = Mock() # ensure that the mock_layer.mlp.gate_up_proj attribute does not exist. # This is necessary because the use of `Mock` automatically creates any # attributes that are accessed, even if they don't exist in the actual # implementation. If `gate_up_proj` were created, `get_mlp_weights` might # follow the wrong execution path and return an incorrect result. del mock_layer.mlp.gate_up_proj result = get_mlp_weights(3, mock_layer) expected = { (3, "c_fc"): ("model.layers.3.mlp.c_fc", mock_layer.mlp.c_fc), (3, "c_proj"): ("model.layers.3.mlp.c_proj", mock_layer.mlp.c_proj), (3, "gate_proj"): ("model.layers.3.mlp.gate_proj", mock_layer.mlp.gate_proj), (3, "up_proj"): ("model.layers.3.mlp.up_proj", mock_layer.mlp.up_proj), (3, "down_proj"): ("model.layers.3.mlp.down_proj", mock_layer.mlp.down_proj), } assert result == expected
text-generation-inference/server/tests/utils/test_adapter.py/0
{ "file_path": "text-generation-inference/server/tests/utils/test_adapter.py", "repo_id": "text-generation-inference", "token_count": 4022 }
314
import os from text_generation_server.utils.import_utils import SYSTEM from .common import Seqlen if os.getenv("USE_FLASH_ATTENTION", "").lower() == "false": raise ImportError("`USE_FLASH_ATTENTION` is false.") if SYSTEM == "cuda": from .cuda import ( SUPPORTS_WINDOWING, attention, paged_attention, ) elif SYSTEM == "rocm": from .rocm import ( SUPPORTS_WINDOWING, attention, paged_attention, ) elif SYSTEM == "ipex": from .ipex import ( SUPPORTS_WINDOWING, attention, paged_attention, ) else: raise ImportError(f"System {SYSTEM} doesn't support flash/paged attention") # KVCache needs `reshape_and_cache`, so ensure that it is defined already. from .kv_cache import KVCache, get_kv_scales __all__ = [ "attention", "get_kv_scales", "paged_attention", "SUPPORTS_WINDOWING", "KVCache", "Seqlen", ]
text-generation-inference/server/text_generation_server/layers/attention/__init__.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/layers/attention/__init__.py", "repo_id": "text-generation-inference", "token_count": 404 }
315
import torch from text_generation_server.utils.import_utils import SYSTEM from torch.nn import functional as F import os if SYSTEM == "rocm": ROCM_USE_SKINNY_GEMM = os.getenv("ROCM_USE_SKINNY_GEMM", "True").lower() in ( "true", "1", ) if ROCM_USE_SKINNY_GEMM: try: import vllm._custom_ops as ops except Exception as e: raise ImportError( f"Could not load `vllm._custom_ops` for ROCm skinny gemm. Full error: {e}" ) class FastLinear(torch.nn.Module): def __init__( self, weight, bias, ) -> None: super().__init__() self.weight = torch.nn.Parameter(weight, requires_grad=False) if bias is not None: self.bias = torch.nn.Parameter(bias, requires_grad=False) else: self.bias = None @classmethod def load(cls, config, prefix: str, weights, bias: bool): weight = weights.get_tensor(f"{prefix}.weight") if bias: bias = weights.get_tensor(f"{prefix}.bias") else: bias = None return cls(weight, bias) def forward(self, input: torch.Tensor) -> torch.Tensor: return F.linear(input, self.weight, self.bias) class FastLinearROCm(torch.nn.Module): def __init__( self, weight, bias, ) -> None: super().__init__() self.weight = torch.nn.Parameter(weight) if bias is not None: self.bias = torch.nn.Parameter(bias) else: self.bias = None self.cu_count = torch.cuda.get_device_properties( device="cuda" ).multi_processor_count self.use_skinny_gemm = ( ROCM_USE_SKINNY_GEMM and "gfx1" not in torch.cuda.get_device_properties("cuda").gcnArchName ) @classmethod def load(cls, config, prefix: str, weights, bias: bool): weight = weights.get_tensor(f"{prefix}.weight") if bias: bias = weights.get_tensor(f"{prefix}.bias") else: bias = None return cls(weight, bias) def forward(self, inp: torch.Tensor) -> torch.Tensor: weight = self.weight bias = self.bias if ( self.use_skinny_gemm and inp.dtype == torch.float16 and inp.shape[-1] % 8 == 0 ): batched = False inp_shape = inp.shape if inp.dim() == 3: inp = inp.view(-1, inp_shape[-1]) batched = True m, n, k = weight.shape[0], inp_shape[0], inp_shape[1] if m > 8 and n <= 4: out = torch.empty( inp_shape[0], weight.shape[0], dtype=inp.dtype, device=weight.device ) ops.wvSpltK(weight, inp, out, n, self.cu_count) elif m % 4 == 0 and n == 1 and k <= 8192: out = torch.empty( inp_shape[0], weight.shape[0], dtype=inp.dtype, device=weight.device ) ops.LLMM1(weight, inp, out, 4) else: out = F.linear(inp, weight) if batched: out.view(*inp_shape[:-1], out.shape[-1]) if bias is not None: out = out + bias return out return F.linear(inp, self.weight, self.bias) def get_linear(weight, bias): # Weights that are loaded through methods that are not # quantization-aware are still bare tensors. We may want # to change this in the future. if isinstance(weight, torch.Tensor): if SYSTEM == "rocm": return FastLinearROCm(weight, bias) else: return FastLinear(weight, bias) return weight.get_linear(bias)
text-generation-inference/server/text_generation_server/layers/linear.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/layers/linear.py", "repo_id": "text-generation-inference", "token_count": 1954 }
316
parser: '@typescript-eslint/parser' parserOptions: ecmaFeatures: jsx: true ecmaVersion: latest sourceType: module project: ./tsconfig.json env: browser: true es6: true node: true jest: true ignorePatterns: ['index.js', 'target/'] plugins: - import - '@typescript-eslint' extends: - eslint:recommended - plugin:prettier/recommended rules: # 0 = off, 1 = warn, 2 = error 'space-before-function-paren': 0 'no-useless-constructor': 0 'no-undef': 2 'no-console': [2, { allow: ['error', 'warn', 'info', 'assert'] }] 'comma-dangle': ['error', 'only-multiline'] 'no-unused-vars': 0 'no-var': 2 'one-var-declaration-per-line': 2 'prefer-const': 2 'no-const-assign': 2 'no-duplicate-imports': 2 'no-use-before-define': [2, { 'functions': false, 'classes': false }] 'eqeqeq': [2, 'always', { 'null': 'ignore' }] 'no-case-declarations': 0 'no-restricted-syntax': [ 2, { 'selector': 'BinaryExpression[operator=/(==|===|!=|!==)/][left.raw=true], BinaryExpression[operator=/(==|===|!=|!==)/][right.raw=true]', 'message': Don't compare for equality against boolean literals, }, ] # https://github.com/benmosher/eslint-plugin-import/pull/334 'import/no-duplicates': 2 'import/first': 2 'import/newline-after-import': 2 'import/order': [ 2, { 'newlines-between': 'always', 'alphabetize': { 'order': 'asc' }, 'groups': ['builtin', 'external', 'internal', 'parent', 'sibling', 'index'], }, ] overrides: - files: - ./**/*{.ts,.tsx} rules: 'no-unused-vars': [2, { varsIgnorePattern: '^_', argsIgnorePattern: '^_', ignoreRestSiblings: true }] 'no-undef': 0 # TypeScript declare merge 'no-redeclare': 0 'no-useless-constructor': 0 'no-dupe-class-members': 0 'no-case-declarations': 0 'no-duplicate-imports': 0 # TypeScript Interface and Type 'no-use-before-define': 0 '@typescript-eslint/adjacent-overload-signatures': 2 '@typescript-eslint/await-thenable': 2 '@typescript-eslint/consistent-type-assertions': 2 '@typescript-eslint/ban-types': [ 'error', { 'types': { 'String': { 'message': 'Use string instead', 'fixWith': 'string' }, 'Number': { 'message': 'Use number instead', 'fixWith': 'number' }, 'Boolean': { 'message': 'Use boolean instead', 'fixWith': 'boolean' }, 'Function': { 'message': 'Use explicit type instead' }, }, }, ] '@typescript-eslint/explicit-member-accessibility': [ 'error', { accessibility: 'explicit', overrides: { accessors: 'no-public', constructors: 'no-public', methods: 'no-public', properties: 'no-public', parameterProperties: 'explicit', }, }, ] '@typescript-eslint/method-signature-style': 2 '@typescript-eslint/no-floating-promises': 2 '@typescript-eslint/no-implied-eval': 2 '@typescript-eslint/no-for-in-array': 2 '@typescript-eslint/no-inferrable-types': 2 '@typescript-eslint/no-invalid-void-type': 2 '@typescript-eslint/no-misused-new': 2 '@typescript-eslint/no-misused-promises': 2 '@typescript-eslint/no-namespace': 2 '@typescript-eslint/no-non-null-asserted-optional-chain': 2 '@typescript-eslint/no-throw-literal': 2 '@typescript-eslint/no-unnecessary-boolean-literal-compare': 2 '@typescript-eslint/prefer-for-of': 2 '@typescript-eslint/prefer-nullish-coalescing': 2 '@typescript-eslint/switch-exhaustiveness-check': 2 '@typescript-eslint/prefer-optional-chain': 2 '@typescript-eslint/prefer-readonly': 2 '@typescript-eslint/prefer-string-starts-ends-with': 0 '@typescript-eslint/no-array-constructor': 2 '@typescript-eslint/require-await': 2 '@typescript-eslint/return-await': 2 '@typescript-eslint/ban-ts-comment': [2, { 'ts-expect-error': false, 'ts-ignore': true, 'ts-nocheck': true, 'ts-check': false }] '@typescript-eslint/naming-convention': [ 2, { selector: 'memberLike', format: ['camelCase', 'PascalCase'], modifiers: ['private'], leadingUnderscore: 'forbid', }, ] '@typescript-eslint/no-unused-vars': [2, { varsIgnorePattern: '^_', argsIgnorePattern: '^_', ignoreRestSiblings: true }] '@typescript-eslint/member-ordering': [ 2, { default: [ 'public-static-field', 'protected-static-field', 'private-static-field', 'public-static-method', 'protected-static-method', 'private-static-method', 'public-instance-field', 'protected-instance-field', 'private-instance-field', 'public-constructor', 'protected-constructor', 'private-constructor', 'public-instance-method', 'protected-instance-method', 'private-instance-method', ], }, ]
tokenizers/bindings/node/.eslintrc.yml/0
{ "file_path": "tokenizers/bindings/node/.eslintrc.yml", "repo_id": "tokenizers", "token_count": 2733 }
317
/* eslint-disable prettier/prettier */ // For a detailed explanation regarding each configuration property, visit: // https://jestjs.io/docs/en/configuration.html module.exports = { // All imported modules in your tests should be mocked automatically // automock: false, // Stop running tests after `n` failures // bail: 0, // Respect "browser" field in package.json when resolving modules // browser: false, // The directory where Jest should store its cached dependency information // cacheDirectory: "/private/var/folders/y_/n6h0fkqn3m57bg_ktk25j7rm0000gn/T/jest_dx", // Automatically clear mock calls and instances between every test // clearMocks: false, // Indicates whether the coverage information should be collected while executing the test // collectCoverage: false, // An array of glob patterns indicating a set of files for which coverage information should be collected // collectCoverageFrom: null, // The directory where Jest should output its coverage files // coverageDirectory: null, // An array of regexp pattern strings used to skip coverage collection // coveragePathIgnorePatterns: [ // "/node_modules/" // ], // A list of reporter names that Jest uses when writing coverage reports // coverageReporters: [ // "json", // "text", // "lcov", // "clover" // ], // An object that configures minimum threshold enforcement for coverage results // coverageThreshold: null, // A path to a custom dependency extractor // dependencyExtractor: null, // Make calling deprecated APIs throw helpful error messages // errorOnDeprecated: false, // Force coverage collection from ignored files using an array of glob patterns // forceCoverageMatch: [], // A path to a module which exports an async function that is triggered once before all test suites // globalSetup: null, // A path to a module which exports an async function that is triggered once after all test suites // globalTeardown: null, // A set of global variables that need to be available in all test environments // globals: {}, // The maximum amount of workers used to run your tests. Can be specified as % or a number. E.g. maxWorkers: 10% will use 10% of your CPU amount + 1 as the maximum worker number. maxWorkers: 2 will use a maximum of 2 workers. // maxWorkers: "50%", // An array of directory names to be searched recursively up from the requiring module's location // moduleDirectories: [ // "node_modules" // ], // An array of file extensions your modules use // moduleFileExtensions: [ // "js", // "json", // "jsx", // "ts", // "tsx", // "node" // ], // A map from regular expressions to module names that allow to stub out resources with a single module // moduleNameMapper: {}, // An array of regexp pattern strings, matched against all module paths before considered 'visible' to the module loader // modulePathIgnorePatterns: [], // Activates notifications for test results // notify: false, // An enum that specifies notification mode. Requires { notify: true } // notifyMode: "failure-change", // A preset that is used as a base for Jest's configuration preset: 'ts-jest', // Run tests from one or more projects // projects: null, // Use this configuration option to add custom reporters to Jest // reporters: undefined, // Automatically reset mock state between every test // resetMocks: false, // Reset the module registry before running each individual test // resetModules: false, // A path to a custom resolver // resolver: null, // Automatically restore mock state between every test // restoreMocks: false, // The root directory that Jest should scan for tests and modules within // rootDir: null, // A list of paths to directories that Jest should use to search for files in // roots: [ // "<rootDir>" // ], // Allows you to use a custom runner instead of Jest's default test runner // runner: "jest-runner", // The paths to modules that run some code to configure or set up the testing environment before each test // setupFiles: [], // A list of paths to modules that run some code to configure or set up the testing framework before each test // setupFilesAfterEnv: [], // A list of paths to snapshot serializer modules Jest should use for snapshot testing // snapshotSerializers: [], // The test environment that will be used for testing testEnvironment: 'node', // Options that will be passed to the testEnvironment // testEnvironmentOptions: {}, // Adds a location field to test results // testLocationInResults: false, // The glob patterns Jest uses to detect test files // testMatch: [ // "**/__tests__/**/*.[jt]s?(x)", // "**/?(*.)+(spec|test).[tj]s?(x)" // ], // An array of regexp pattern strings that are matched against all test paths, matched tests are skipped testPathIgnorePatterns: ['/node_modules/', '/dist/'], // The regexp pattern or array of patterns that Jest uses to detect test files // testRegex: [], // This option allows the use of a custom results processor // testResultsProcessor: null, // This option allows use of a custom test runner // testRunner: "jasmine2", // This option sets the URL for the jsdom environment. It is reflected in properties such as location.href // testURL: "http://localhost", // Setting this value to "fake" allows the use of fake timers for functions such as "setTimeout" // timers: "real", // A map from regular expressions to paths to transformers // transform: null, // An array of regexp pattern strings that are matched against all source file paths, matched files will skip transformation // transformIgnorePatterns: [ // "/node_modules/" // ], // An array of regexp pattern strings that are matched against all modules before the module loader will automatically return a mock for them // unmockedModulePathPatterns: undefined, // Indicates whether each individual test should be reported during the run // verbose: null, // An array of regexp patterns that are matched against all source file paths before re-running tests in watch mode watchPathIgnorePatterns: ['<rootDir>/node_modules/', '<rootDir>/native/', '<rootDir>/dist/', '<rootDir>/build/'], // Whether to use watchman for file crawling // watchman: true, }
tokenizers/bindings/node/jest.config.js/0
{ "file_path": "tokenizers/bindings/node/jest.config.js", "repo_id": "tokenizers", "token_count": 1715 }
318
# `tokenizers-darwin-arm64` This is the **aarch64-apple-darwin** binary for `tokenizers`
tokenizers/bindings/node/npm/darwin-arm64/README.md/0
{ "file_path": "tokenizers/bindings/node/npm/darwin-arm64/README.md", "repo_id": "tokenizers", "token_count": 33 }
319
# `tokenizers-win32-arm64-msvc` This is the **aarch64-pc-windows-msvc** binary for `tokenizers`
tokenizers/bindings/node/npm/win32-arm64-msvc/README.md/0
{ "file_path": "tokenizers/bindings/node/npm/win32-arm64-msvc/README.md", "repo_id": "tokenizers", "token_count": 38 }
320
pub mod models; pub mod tokenizer;
tokenizers/bindings/node/src/tasks/mod.rs/0
{ "file_path": "tokenizers/bindings/node/src/tasks/mod.rs", "repo_id": "tokenizers", "token_count": 11 }
321
import os import time import argparse from datasets import load_dataset from tiktoken.load import load_tiktoken_bpe import tiktoken from tokenizers import Tokenizer from huggingface_hub import hf_hub_download from typing import Tuple, List from multiprocessing import Process MODEL_ID = "meta-llama/Meta-Llama-3.1-8B" DATASET = "facebook/xnli" DATASET_CONFIG = "all_languages" DEFAULT_THREADS = [2**i for i in range(8) if 2**i <= os.cpu_count()] def format_byte_size(num_bytes: int) -> Tuple[str, str]: """Convert bytes to a human-readable format (KB, MB, GB).""" num_bytes_f = float(num_bytes) for unit in ["B", "KB", "MB", "GB", "TB"]: if num_bytes_f < 1024: return f"{num_bytes_f:.2f} {unit}", unit num_bytes_f /= 1024 return f"{num_bytes_f:.2f} PB", "PB" def benchmark_batch(model: str, documents: list[str], num_threads: int, document_length: float) -> None: os.environ["RAYON_NUM_THREADS"] = str(num_threads) num_bytes = sum(map(len, map(str.encode, documents))) readable_size, unit = format_byte_size(num_bytes) print(f"==============") print(f"num_threads: {num_threads}, data size: {readable_size}, documents: {len(documents)} Avg Length: {document_length:.0f}") filename = hf_hub_download(MODEL_ID, "original/tokenizer.model") mergeable_ranks = load_tiktoken_bpe(filename) pat_str = r"(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}{1,3}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+" num_reserved_special_tokens = 256 special_tokens = [ "<|begin_of_text|>", "<|end_of_text|>", "<|reserved_special_token_0|>", "<|reserved_special_token_1|>", "<|reserved_special_token_2|>", "<|reserved_special_token_3|>", "<|start_header_id|>", "<|end_header_id|>", "<|reserved_special_token_4|>", "<|eot_id|>", # end of turn ] + [ f"<|reserved_special_token_{i}|>" for i in range(5, num_reserved_special_tokens - 5) ] num_base_tokens = len(mergeable_ranks) special_tokens = { token: num_base_tokens + i for i, token in enumerate(special_tokens) } enc = tiktoken.Encoding( name=model, pat_str=pat_str, mergeable_ranks=mergeable_ranks, special_tokens=special_tokens, ) out = enc.encode("This is a test") hf_enc = Tokenizer.from_pretrained(model) out2 = hf_enc.encode("This is a test", add_special_tokens=False).ids assert out == out2, "sanity check" start = time.perf_counter_ns() enc.encode_ordinary_batch(documents, num_threads=num_threads) end = time.perf_counter_ns() readable_size, unit = format_byte_size(num_bytes / (end - start) * 1e9) print(f"tiktoken \t{readable_size} / s") start = time.perf_counter_ns() hf_enc.encode_batch_fast(documents) end = time.perf_counter_ns() readable_size, unit = format_byte_size(num_bytes / (end - start) * 1e9) print(f"huggingface \t{readable_size} / s") def test(model: str, dataset: str, dataset_config: str, threads: List[int]): dataset_xnli = load_dataset(dataset, dataset_config) input_lengths = [(10, False, True), (10_000, False, True), (10_000, False, False)] for num_threads in threads: for length, fuse, long in input_lengths: documents = [] for i, item in enumerate(dataset_xnli["train"]): if i >= length: break if long: documents.append("".join(item["premise"].values())) else: documents.append(item["premise"]["en"]) if fuse: documents=["".join(documents)] document_length = sum(len(d) for d in documents) / len(documents) # Rayon thread pool is global to a process, we need to launch # separate processes in order to accurately use the correct number of threads. # Otherwise, we're simply running tokenizers in whatever tests comes first. # tokenizers does NOT provide a method to change the number of threads during # runtime. p = Process(target=benchmark_batch, args=(model, documents, num_threads, document_length)) p.start() p.join() # benchmark_batch(model, documents, num_threads) def main(): parser = argparse.ArgumentParser( prog='bench_tokenizer', description='Getting a feel for speed when tokenizing', ) parser.add_argument('-m', '--model', default=MODEL_ID, type=str) parser.add_argument('-d', '--dataset', default=DATASET, type=str) parser.add_argument('-ds', '--dataset-config', default=DATASET_CONFIG, type=str) parser.add_argument('-t', '--threads', nargs='+', default=DEFAULT_THREADS, type=int) args = parser.parse_args() test(args.model, args.dataset, args.dataset_config, args.threads) # Call the function to run the benchmark if __name__ == "__main__": main()
tokenizers/bindings/python/benches/test_tiktoken.py/0
{ "file_path": "tokenizers/bindings/python/benches/test_tiktoken.py", "repo_id": "tokenizers", "token_count": 2288 }
322
from typing import Dict, Iterator, List, Optional, Tuple, Union from .. import AddedToken, Tokenizer, decoders, pre_tokenizers, trainers from ..models import BPE from ..normalizers import BertNormalizer, Lowercase, Sequence, unicode_normalizer_from_str from .base_tokenizer import BaseTokenizer class CharBPETokenizer(BaseTokenizer): """Original BPE Tokenizer Represents the BPE algorithm, as introduced by Rico Sennrich (https://arxiv.org/abs/1508.07909) The defaults settings corresponds to OpenAI GPT BPE tokenizers and differs from the original Sennrich subword-nmt implementation by the following options that you can deactivate: - adding a normalizer to clean up the text (deactivate with `bert_normalizer=False`) by: * removing any control characters and replacing all whitespaces by the classic one. * handle chinese chars by putting spaces around them. * strip all accents. - spitting on punctuation in addition to whitespaces (deactivate it with `split_on_whitespace_only=True`) """ def __init__( self, vocab: Optional[Union[str, Dict[str, int]]] = None, merges: Optional[Union[str, List[Tuple[str, str]]]] = None, unk_token: Union[str, AddedToken] = "<unk>", suffix: str = "</w>", dropout: Optional[float] = None, lowercase: bool = False, unicode_normalizer: Optional[str] = None, bert_normalizer: bool = True, split_on_whitespace_only: bool = False, ): if vocab is not None and merges is not None: tokenizer = Tokenizer( BPE( vocab, merges, dropout=dropout, unk_token=str(unk_token), end_of_word_suffix=suffix, ) ) else: tokenizer = Tokenizer(BPE(unk_token=str(unk_token), dropout=dropout, end_of_word_suffix=suffix)) if tokenizer.token_to_id(str(unk_token)) is not None: tokenizer.add_special_tokens([str(unk_token)]) # Check for Unicode normalization first (before everything else) normalizers = [] if unicode_normalizer: normalizers += [unicode_normalizer_from_str(unicode_normalizer)] if bert_normalizer: normalizers += [BertNormalizer(lowercase=False)] if lowercase: normalizers += [Lowercase()] # Create the normalizer structure if len(normalizers) > 0: if len(normalizers) > 1: tokenizer.normalizer = Sequence(normalizers) else: tokenizer.normalizer = normalizers[0] if split_on_whitespace_only: tokenizer.pre_tokenizer = pre_tokenizers.WhitespaceSplit() else: tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer() tokenizer.decoder = decoders.BPEDecoder(suffix=suffix) parameters = { "model": "BPE", "unk_token": unk_token, "suffix": suffix, "dropout": dropout, "lowercase": lowercase, "unicode_normalizer": unicode_normalizer, "bert_normalizer": bert_normalizer, "split_on_whitespace_only": split_on_whitespace_only, } super().__init__(tokenizer, parameters) @staticmethod def from_file(vocab_filename: str, merges_filename: str, **kwargs): vocab, merges = BPE.read_file(vocab_filename, merges_filename) return CharBPETokenizer(vocab, merges, **kwargs) def train( self, files: Union[str, List[str]], vocab_size: int = 30000, min_frequency: int = 2, special_tokens: List[Union[str, AddedToken]] = ["<unk>"], limit_alphabet: int = 1000, initial_alphabet: List[str] = [], suffix: Optional[str] = "</w>", show_progress: bool = True, ): """Train the model using the given files""" trainer = trainers.BpeTrainer( vocab_size=vocab_size, min_frequency=min_frequency, special_tokens=special_tokens, limit_alphabet=limit_alphabet, initial_alphabet=initial_alphabet, end_of_word_suffix=suffix, show_progress=show_progress, ) if isinstance(files, str): files = [files] self._tokenizer.train(files, trainer=trainer) def train_from_iterator( self, iterator: Union[Iterator[str], Iterator[Iterator[str]]], vocab_size: int = 30000, min_frequency: int = 2, special_tokens: List[Union[str, AddedToken]] = ["<unk>"], limit_alphabet: int = 1000, initial_alphabet: List[str] = [], suffix: Optional[str] = "</w>", show_progress: bool = True, length: Optional[int] = None, ): """Train the model using the given iterator""" trainer = trainers.BpeTrainer( vocab_size=vocab_size, min_frequency=min_frequency, special_tokens=special_tokens, limit_alphabet=limit_alphabet, initial_alphabet=initial_alphabet, end_of_word_suffix=suffix, show_progress=show_progress, ) self._tokenizer.train_from_iterator( iterator, trainer=trainer, length=length, )
tokenizers/bindings/python/py_src/tokenizers/implementations/char_level_bpe.py/0
{ "file_path": "tokenizers/bindings/python/py_src/tokenizers/implementations/char_level_bpe.py", "repo_id": "tokenizers", "token_count": 2501 }
323
[project] name = "tokenizers" requires-python = ">=3.9" authors = [ { name = "Nicolas Patry", email = "patry.nicolas@protonmail.com" }, { name = "Anthony Moi", email = "anthony@huggingface.co" }, ] classifiers = [ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Intended Audience :: Education", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3 :: Only", "Topic :: Scientific/Engineering :: Artificial Intelligence", ] keywords = ["NLP", "tokenizer", "BPE", "transformer", "deep learning"] dynamic = ["description", "license", "readme", "version"] dependencies = ["huggingface_hub>=0.16.4,<1.0"] [project.urls] Homepage = "https://github.com/huggingface/tokenizers" Source = "https://github.com/huggingface/tokenizers" [project.optional-dependencies] testing = ["pytest", "requests", "numpy", "datasets", "black==22.3", "ruff"] docs = ["sphinx", "sphinx_rtd_theme", "setuptools_rust"] dev = ["tokenizers[testing]"] [build-system] requires = ["maturin>=1.0,<2.0"] build-backend = "maturin" [tool.maturin] python-source = "py_src" module-name = "tokenizers.tokenizers" bindings = "pyo3" features = ["pyo3/extension-module"] [tool.black] line-length = 119 target-version = ["py35"] [tool.ruff] line-length = 119 target-version = "py311" lint.ignore = [ # a == None in tests vs is None. "E711", # a == False in tests vs is False. "E712", # try.. import except.. pattern without using the lib. "F401", # Raw type equality is required in asserts "E721", # Import order "E402", # Fixtures unused import "F811", ]
tokenizers/bindings/python/pyproject.toml/0
{ "file_path": "tokenizers/bindings/python/pyproject.toml", "repo_id": "tokenizers", "token_count": 684 }
324
use std::sync::{Arc, RwLock}; use crate::models::PyModel; use crate::tokenizer::PyAddedToken; use pyo3::exceptions; use pyo3::prelude::*; use pyo3::types::*; use serde::{Deserialize, Serialize}; use tk::models::TrainerWrapper; use tk::Trainer; use tokenizers as tk; /// Base class for all trainers /// /// This class is not supposed to be instantiated directly. Instead, any implementation of a /// Trainer will return an instance of this class when instantiated. #[pyclass(module = "tokenizers.trainers", name = "Trainer", subclass)] #[derive(Clone, Deserialize, Serialize)] #[serde(transparent)] pub struct PyTrainer { pub trainer: Arc<RwLock<TrainerWrapper>>, } impl PyTrainer { #[cfg(test)] pub(crate) fn new(trainer: Arc<RwLock<TrainerWrapper>>) -> Self { PyTrainer { trainer } } pub(crate) fn get_as_subtype(&self, py: Python<'_>) -> PyResult<PyObject> { let base = self.clone(); Ok(match *self.trainer.as_ref().read().unwrap() { TrainerWrapper::BpeTrainer(_) => Py::new(py, (PyBpeTrainer {}, base))? .into_pyobject(py)? .into_any() .into(), TrainerWrapper::WordPieceTrainer(_) => Py::new(py, (PyWordPieceTrainer {}, base))? .into_pyobject(py)? .into_any() .into(), TrainerWrapper::WordLevelTrainer(_) => Py::new(py, (PyWordLevelTrainer {}, base))? .into_pyobject(py)? .into_any() .into(), TrainerWrapper::UnigramTrainer(_) => Py::new(py, (PyUnigramTrainer {}, base))? .into_pyobject(py)? .into_any() .into(), }) } } #[pymethods] impl PyTrainer { fn __getstate__(&self, py: Python) -> PyResult<PyObject> { let data = serde_json::to_string(&self.trainer).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to pickle PyTrainer: {e}" )) })?; Ok(PyBytes::new(py, data.as_bytes()).into()) } fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> { match state.extract::<&[u8]>(py) { Ok(s) => { let unpickled = serde_json::from_slice(s).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to unpickle PyTrainer: {e}" )) })?; self.trainer = unpickled; Ok(()) } Err(e) => Err(e), } } fn __repr__(&self) -> PyResult<String> { crate::utils::serde_pyo3::repr(self) .map_err(|e| exceptions::PyException::new_err(e.to_string())) } fn __str__(&self) -> PyResult<String> { crate::utils::serde_pyo3::to_string(self) .map_err(|e| exceptions::PyException::new_err(e.to_string())) } } impl Trainer for PyTrainer { type Model = PyModel; fn should_show_progress(&self) -> bool { self.trainer.read().unwrap().should_show_progress() } fn train(&self, model: &mut PyModel) -> tk::Result<Vec<tk::AddedToken>> { self.trainer .read() .unwrap() .train(&mut model.model.write().unwrap()) } fn feed<I, S, F>(&mut self, iterator: I, process: F) -> tk::Result<()> where I: Iterator<Item = S> + Send, S: AsRef<str> + Send, F: Fn(&str) -> tk::Result<Vec<String>> + Sync, { self.trainer.write().unwrap().feed(iterator, process) } } impl<I> From<I> for PyTrainer where I: Into<TrainerWrapper>, { fn from(trainer: I) -> Self { PyTrainer { trainer: Arc::new(RwLock::new(trainer.into())), } } } macro_rules! getter { ($self: ident, $variant: ident, $($name: tt)+) => {{ let super_ = $self.as_ref(); if let TrainerWrapper::$variant(ref trainer) = *super_.trainer.read().unwrap() { trainer.$($name)+ } else { unreachable!() } }}; } macro_rules! setter { ($self: ident, $variant: ident, $name: ident, $value: expr) => {{ let super_ = $self.as_ref(); if let TrainerWrapper::$variant(ref mut trainer) = *super_.trainer.write().unwrap() { trainer.$name = $value; } }}; ($self: ident, $variant: ident, @$name: ident, $value: expr) => {{ let super_ = $self.as_ref(); if let TrainerWrapper::$variant(ref mut trainer) = *super_.trainer.write().unwrap() { trainer.$name($value); } }}; } /// Trainer capable of training a BPE model /// /// Args: /// vocab_size (:obj:`int`, `optional`): /// The size of the final vocabulary, including all tokens and alphabet. /// /// min_frequency (:obj:`int`, `optional`): /// The minimum frequency a pair should have in order to be merged. /// /// show_progress (:obj:`bool`, `optional`): /// Whether to show progress bars while training. /// /// special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`): /// A list of special tokens the model should know of. /// /// limit_alphabet (:obj:`int`, `optional`): /// The maximum different characters to keep in the alphabet. /// /// initial_alphabet (:obj:`List[str]`, `optional`): /// A list of characters to include in the initial alphabet, even /// if not seen in the training dataset. /// If the strings contain more than one character, only the first one /// is kept. /// /// continuing_subword_prefix (:obj:`str`, `optional`): /// A prefix to be used for every subword that is not a beginning-of-word. /// /// end_of_word_suffix (:obj:`str`, `optional`): /// A suffix to be used for every subword that is a end-of-word. /// /// max_token_length (:obj:`int`, `optional`): /// Prevents creating tokens longer than the specified size. /// This can help with reducing polluting your vocabulary with /// highly repetitive tokens like `======` for wikipedia /// #[pyclass(extends=PyTrainer, module = "tokenizers.trainers", name = "BpeTrainer")] pub struct PyBpeTrainer {} #[pymethods] impl PyBpeTrainer { #[getter] fn get_vocab_size(self_: PyRef<Self>) -> usize { getter!(self_, BpeTrainer, vocab_size) } #[setter] fn set_vocab_size(self_: PyRef<Self>, vocab_size: usize) { setter!(self_, BpeTrainer, vocab_size, vocab_size); } #[getter] fn get_min_frequency(self_: PyRef<Self>) -> u64 { getter!(self_, BpeTrainer, min_frequency) } #[setter] fn set_min_frequency(self_: PyRef<Self>, freq: u64) { setter!(self_, BpeTrainer, min_frequency, freq); } #[getter] fn get_show_progress(self_: PyRef<Self>) -> bool { getter!(self_, BpeTrainer, show_progress) } #[setter] fn set_show_progress(self_: PyRef<Self>, show_progress: bool) { setter!(self_, BpeTrainer, show_progress, show_progress); } #[getter] fn get_special_tokens(self_: PyRef<Self>) -> Vec<PyAddedToken> { getter!( self_, BpeTrainer, special_tokens .iter() .map(|tok| tok.clone().into()) .collect() ) } #[setter] fn set_special_tokens(self_: PyRef<Self>, special_tokens: &Bound<'_, PyList>) -> PyResult<()> { setter!( self_, BpeTrainer, special_tokens, special_tokens .into_iter() .map(|token| { if let Ok(content) = token.extract::<String>() { Ok(tk::tokenizer::AddedToken::from(content, true)) } else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() { token.special = true; Ok(token.get_token()) } else { Err(exceptions::PyTypeError::new_err( "Special tokens must be a List[Union[str, AddedToken]]", )) } }) .collect::<PyResult<Vec<_>>>()? ); Ok(()) } #[getter] fn get_limit_alphabet(self_: PyRef<Self>) -> Option<usize> { getter!(self_, BpeTrainer, limit_alphabet) } #[setter] fn set_limit_alphabet(self_: PyRef<Self>, limit: Option<usize>) { setter!(self_, BpeTrainer, limit_alphabet, limit); } #[getter] fn get_max_token_length(self_: PyRef<Self>) -> Option<usize> { getter!(self_, BpeTrainer, max_token_length) } #[setter] fn set_max_token_length(self_: PyRef<Self>, limit: Option<usize>) { setter!(self_, BpeTrainer, max_token_length, limit); } #[getter] fn get_initial_alphabet(self_: PyRef<Self>) -> Vec<String> { getter!( self_, BpeTrainer, initial_alphabet.iter().map(|c| c.to_string()).collect() ) } #[setter] fn set_initial_alphabet(self_: PyRef<Self>, alphabet: Vec<char>) { setter!( self_, BpeTrainer, initial_alphabet, alphabet.into_iter().collect() ); } #[getter] fn get_continuing_subword_prefix(self_: PyRef<Self>) -> Option<String> { getter!(self_, BpeTrainer, continuing_subword_prefix.clone()) } #[setter] fn set_continuing_subword_prefix(self_: PyRef<Self>, prefix: Option<String>) { setter!(self_, BpeTrainer, continuing_subword_prefix, prefix); } #[getter] fn get_end_of_word_suffix(self_: PyRef<Self>) -> Option<String> { getter!(self_, BpeTrainer, end_of_word_suffix.clone()) } #[setter] fn set_end_of_word_suffix(self_: PyRef<Self>, suffix: Option<String>) { setter!(self_, BpeTrainer, end_of_word_suffix, suffix); } #[new] #[pyo3(signature = (**kwargs), text_signature = None)] pub fn new(kwargs: Option<&Bound<'_, PyDict>>) -> PyResult<(Self, PyTrainer)> { let mut builder = tk::models::bpe::BpeTrainer::builder(); if let Some(kwargs) = kwargs { for (key, val) in kwargs { let key: String = key.extract()?; match key.as_ref() { "vocab_size" => builder = builder.vocab_size(val.extract()?), "min_frequency" => builder = builder.min_frequency(val.extract()?), "show_progress" => builder = builder.show_progress(val.extract()?), "special_tokens" => { builder = builder.special_tokens( val.downcast::<PyList>()? .into_iter() .map(|token| { if let Ok(content) = token.extract::<String>() { Ok(PyAddedToken::from(content, Some(true)).get_token()) } else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() { token.special = true; Ok(token.get_token()) } else { Err(exceptions::PyTypeError::new_err( "special_tokens must be a List[Union[str, AddedToken]]", )) } }) .collect::<PyResult<Vec<_>>>()?, ); } "limit_alphabet" => builder = builder.limit_alphabet(val.extract()?), "max_token_length" => builder = builder.max_token_length(val.extract()?), "initial_alphabet" => { let alphabet: Vec<String> = val.extract()?; builder = builder.initial_alphabet( alphabet .into_iter() .filter_map(|s| s.chars().next()) .collect(), ); } "continuing_subword_prefix" => { builder = builder.continuing_subword_prefix(val.extract()?) } "end_of_word_suffix" => builder = builder.end_of_word_suffix(val.extract()?), _ => println!("Ignored unknown kwargs option {key}"), }; } } Ok((PyBpeTrainer {}, builder.build().into())) } } /// Trainer capable of training a WordPiece model /// /// Args: /// vocab_size (:obj:`int`, `optional`): /// The size of the final vocabulary, including all tokens and alphabet. /// /// min_frequency (:obj:`int`, `optional`): /// The minimum frequency a pair should have in order to be merged. /// /// show_progress (:obj:`bool`, `optional`): /// Whether to show progress bars while training. /// /// special_tokens (:obj:`List[Union[str, AddedToken]]`, `optional`): /// A list of special tokens the model should know of. /// /// limit_alphabet (:obj:`int`, `optional`): /// The maximum different characters to keep in the alphabet. /// /// initial_alphabet (:obj:`List[str]`, `optional`): /// A list of characters to include in the initial alphabet, even /// if not seen in the training dataset. /// If the strings contain more than one character, only the first one /// is kept. /// /// continuing_subword_prefix (:obj:`str`, `optional`): /// A prefix to be used for every subword that is not a beginning-of-word. /// /// end_of_word_suffix (:obj:`str`, `optional`): /// A suffix to be used for every subword that is a end-of-word. #[pyclass(extends=PyTrainer, module = "tokenizers.trainers", name = "WordPieceTrainer")] pub struct PyWordPieceTrainer {} #[pymethods] impl PyWordPieceTrainer { #[getter] fn get_vocab_size(self_: PyRef<Self>) -> usize { getter!(self_, WordPieceTrainer, vocab_size()) } #[setter] fn set_vocab_size(self_: PyRef<Self>, vocab_size: usize) { setter!(self_, WordPieceTrainer, @set_vocab_size, vocab_size); } #[getter] fn get_min_frequency(self_: PyRef<Self>) -> u64 { getter!(self_, WordPieceTrainer, min_frequency()) } #[setter] fn set_min_frequency(self_: PyRef<Self>, freq: u64) { setter!(self_, WordPieceTrainer, @set_min_frequency, freq); } #[getter] fn get_show_progress(self_: PyRef<Self>) -> bool { getter!(self_, WordPieceTrainer, show_progress()) } #[setter] fn set_show_progress(self_: PyRef<Self>, show_progress: bool) { setter!(self_, WordPieceTrainer, @set_show_progress, show_progress); } #[getter] fn get_special_tokens(self_: PyRef<Self>) -> Vec<PyAddedToken> { getter!( self_, WordPieceTrainer, special_tokens() .iter() .map(|tok| tok.clone().into()) .collect() ) } #[setter] fn set_special_tokens(self_: PyRef<Self>, special_tokens: &Bound<'_, PyList>) -> PyResult<()> { setter!( self_, WordPieceTrainer, @set_special_tokens, special_tokens .into_iter() .map(|token| { if let Ok(content) = token.extract::<String>() { Ok(tk::tokenizer::AddedToken::from(content, true)) } else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() { token.special = true; Ok(token.get_token()) } else { Err(exceptions::PyTypeError::new_err( "Special tokens must be a List[Union[str, AddedToken]]", )) } }) .collect::<PyResult<Vec<_>>>()? ); Ok(()) } #[getter] fn get_limit_alphabet(self_: PyRef<Self>) -> Option<usize> { getter!(self_, WordPieceTrainer, limit_alphabet()) } #[setter] fn set_limit_alphabet(self_: PyRef<Self>, limit: Option<usize>) { setter!(self_, WordPieceTrainer, @set_limit_alphabet, limit); } #[getter] fn get_initial_alphabet(self_: PyRef<Self>) -> Vec<String> { getter!( self_, WordPieceTrainer, initial_alphabet().iter().map(|c| c.to_string()).collect() ) } #[setter] fn set_initial_alphabet(self_: PyRef<Self>, alphabet: Vec<char>) { setter!( self_, WordPieceTrainer, @set_initial_alphabet, alphabet.into_iter().collect() ); } #[getter] fn get_continuing_subword_prefix(self_: PyRef<Self>) -> Option<String> { getter!(self_, WordPieceTrainer, continuing_subword_prefix().clone()) } #[setter] fn set_continuing_subword_prefix(self_: PyRef<Self>, prefix: Option<String>) { setter!(self_, WordPieceTrainer, @set_continuing_subword_prefix, prefix); } #[getter] fn get_end_of_word_suffix(self_: PyRef<Self>) -> Option<String> { getter!(self_, WordPieceTrainer, end_of_word_suffix().clone()) } #[setter] fn set_end_of_word_suffix(self_: PyRef<Self>, suffix: Option<String>) { setter!(self_, WordPieceTrainer, @set_end_of_word_suffix, suffix); } #[new] #[pyo3( signature = (** kwargs), text_signature = "(self, vocab_size=30000, min_frequency=0, show_progress=True, special_tokens=[], limit_alphabet=None, initial_alphabet= [],continuing_subword_prefix=\"##\", end_of_word_suffix=None)" )] pub fn new(kwargs: Option<&Bound<'_, PyDict>>) -> PyResult<(Self, PyTrainer)> { let mut builder = tk::models::wordpiece::WordPieceTrainer::builder(); if let Some(kwargs) = kwargs { for (key, val) in kwargs { let key: String = key.extract()?; match key.as_ref() { "vocab_size" => builder = builder.vocab_size(val.extract()?), "min_frequency" => builder = builder.min_frequency(val.extract()?), "show_progress" => builder = builder.show_progress(val.extract()?), "special_tokens" => { builder = builder.special_tokens( val.downcast::<PyList>()? .into_iter() .map(|token| { if let Ok(content) = token.extract::<String>() { Ok(PyAddedToken::from(content, Some(true)).get_token()) } else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() { token.special = true; Ok(token.get_token()) } else { Err(exceptions::PyTypeError::new_err( "special_tokens must be a List[Union[str, AddedToken]]", )) } }) .collect::<PyResult<Vec<_>>>()?, ); } "limit_alphabet" => builder = builder.limit_alphabet(val.extract()?), "initial_alphabet" => { let alphabet: Vec<String> = val.extract()?; builder = builder.initial_alphabet( alphabet .into_iter() .filter_map(|s| s.chars().next()) .collect(), ); } "continuing_subword_prefix" => { builder = builder.continuing_subword_prefix(val.extract()?) } "end_of_word_suffix" => builder = builder.end_of_word_suffix(val.extract()?), _ => println!("Ignored unknown kwargs option {key}"), }; } } Ok((PyWordPieceTrainer {}, builder.build().into())) } } /// Trainer capable of training a WorldLevel model /// /// Args: /// vocab_size (:obj:`int`, `optional`): /// The size of the final vocabulary, including all tokens and alphabet. /// /// min_frequency (:obj:`int`, `optional`): /// The minimum frequency a pair should have in order to be merged. /// /// show_progress (:obj:`bool`, `optional`): /// Whether to show progress bars while training. /// /// special_tokens (:obj:`List[Union[str, AddedToken]]`): /// A list of special tokens the model should know of. #[pyclass(extends=PyTrainer, module = "tokenizers.trainers", name = "WordLevelTrainer")] pub struct PyWordLevelTrainer {} #[pymethods] impl PyWordLevelTrainer { #[getter] fn get_vocab_size(self_: PyRef<Self>) -> usize { getter!(self_, WordLevelTrainer, vocab_size) } #[setter] fn set_vocab_size(self_: PyRef<Self>, vocab_size: usize) { setter!(self_, WordLevelTrainer, vocab_size, vocab_size); } #[getter] fn get_min_frequency(self_: PyRef<Self>) -> u64 { getter!(self_, WordLevelTrainer, min_frequency) } #[setter] fn set_min_frequency(self_: PyRef<Self>, freq: u64) { setter!(self_, WordLevelTrainer, min_frequency, freq); } #[getter] fn get_show_progress(self_: PyRef<Self>) -> bool { getter!(self_, WordLevelTrainer, show_progress) } #[setter] fn set_show_progress(self_: PyRef<Self>, show_progress: bool) { setter!(self_, WordLevelTrainer, show_progress, show_progress); } #[getter] fn get_special_tokens(self_: PyRef<Self>) -> Vec<PyAddedToken> { getter!( self_, WordLevelTrainer, special_tokens .iter() .map(|tok| tok.clone().into()) .collect() ) } #[setter] fn set_special_tokens(self_: PyRef<Self>, special_tokens: &Bound<'_, PyList>) -> PyResult<()> { setter!( self_, WordLevelTrainer, special_tokens, special_tokens .into_iter() .map(|token| { if let Ok(content) = token.extract::<String>() { Ok(tk::tokenizer::AddedToken::from(content, true)) } else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() { token.special = true; Ok(token.get_token()) } else { Err(exceptions::PyTypeError::new_err( "Special tokens must be a List[Union[str, AddedToken]]", )) } }) .collect::<PyResult<Vec<_>>>()? ); Ok(()) } #[new] #[pyo3(signature = (**kwargs), text_signature = None)] pub fn new(kwargs: Option<&Bound<'_, PyDict>>) -> PyResult<(Self, PyTrainer)> { let mut builder = tk::models::wordlevel::WordLevelTrainer::builder(); if let Some(kwargs) = kwargs { for (key, val) in kwargs { let key: String = key.extract()?; match key.as_ref() { "vocab_size" => { builder.vocab_size(val.extract()?); } "min_frequency" => { builder.min_frequency(val.extract()?); } "show_progress" => { builder.show_progress(val.extract()?); } "special_tokens" => { builder.special_tokens( val.downcast::<PyList>()? .into_iter() .map(|token| { if let Ok(content) = token.extract::<String>() { Ok(PyAddedToken::from(content, Some(true)).get_token()) } else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() { token.special = true; Ok(token.get_token()) } else { Err(exceptions::PyTypeError::new_err( "special_tokens must be a List[Union[str, AddedToken]]", )) } }) .collect::<PyResult<Vec<_>>>()?, ); } _ => println!("Ignored unknown kwargs option {key}"), } } } Ok(( PyWordLevelTrainer {}, builder .build() .expect("WordLevelTrainerBuilder cannot fail") .into(), )) } } /// Trainer capable of training a Unigram model /// /// Args: /// vocab_size (:obj:`int`): /// The size of the final vocabulary, including all tokens and alphabet. /// /// show_progress (:obj:`bool`): /// Whether to show progress bars while training. /// /// special_tokens (:obj:`List[Union[str, AddedToken]]`): /// A list of special tokens the model should know of. /// /// initial_alphabet (:obj:`List[str]`): /// A list of characters to include in the initial alphabet, even /// if not seen in the training dataset. /// If the strings contain more than one character, only the first one /// is kept. /// /// shrinking_factor (:obj:`float`): /// The shrinking factor used at each step of the training to prune the /// vocabulary. /// /// unk_token (:obj:`str`): /// The token used for out-of-vocabulary tokens. /// /// max_piece_length (:obj:`int`): /// The maximum length of a given token. /// /// n_sub_iterations (:obj:`int`): /// The number of iterations of the EM algorithm to perform before /// pruning the vocabulary. #[pyclass(extends=PyTrainer, module = "tokenizers.trainers", name = "UnigramTrainer")] pub struct PyUnigramTrainer {} #[pymethods] impl PyUnigramTrainer { #[getter] fn get_vocab_size(self_: PyRef<Self>) -> u32 { getter!(self_, UnigramTrainer, vocab_size) } #[setter] fn set_vocab_size(self_: PyRef<Self>, vocab_size: u32) { setter!(self_, UnigramTrainer, vocab_size, vocab_size); } #[getter] fn get_show_progress(self_: PyRef<Self>) -> bool { getter!(self_, UnigramTrainer, show_progress) } #[setter] fn set_show_progress(self_: PyRef<Self>, show_progress: bool) { setter!(self_, UnigramTrainer, show_progress, show_progress); } #[getter] fn get_special_tokens(self_: PyRef<Self>) -> Vec<PyAddedToken> { getter!( self_, UnigramTrainer, special_tokens .iter() .map(|tok| tok.clone().into()) .collect() ) } #[setter] fn set_special_tokens(self_: PyRef<Self>, special_tokens: &Bound<'_, PyList>) -> PyResult<()> { setter!( self_, UnigramTrainer, special_tokens, special_tokens .into_iter() .map(|token| { if let Ok(content) = token.extract::<String>() { Ok(tk::tokenizer::AddedToken::from(content, true)) } else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() { token.special = true; Ok(token.get_token()) } else { Err(exceptions::PyTypeError::new_err( "Special tokens must be a List[Union[str, AddedToken]]", )) } }) .collect::<PyResult<Vec<_>>>()? ); Ok(()) } #[getter] fn get_initial_alphabet(self_: PyRef<Self>) -> Vec<String> { getter!( self_, UnigramTrainer, initial_alphabet.iter().map(|c| c.to_string()).collect() ) } #[setter] fn set_initial_alphabet(self_: PyRef<Self>, alphabet: Vec<char>) { setter!( self_, UnigramTrainer, initial_alphabet, alphabet.into_iter().collect() ); } #[new] #[pyo3( signature = (**kwargs), text_signature = "(self, vocab_size=8000, show_progress=True, special_tokens=[], shrinking_factor=0.75, unk_token=None, max_piece_length=16, n_sub_iterations=2)" )] pub fn new(kwargs: Option<Bound<'_, PyDict>>) -> PyResult<(Self, PyTrainer)> { let mut builder = tk::models::unigram::UnigramTrainer::builder(); if let Some(kwargs) = kwargs { for (key, val) in kwargs { let key: String = key.extract()?; match key.as_ref() { "vocab_size" => builder.vocab_size(val.extract()?), "show_progress" => builder.show_progress(val.extract()?), "n_sub_iterations" => builder.n_sub_iterations(val.extract()?), "shrinking_factor" => builder.shrinking_factor(val.extract()?), "unk_token" => builder.unk_token(val.extract()?), "max_piece_length" => builder.max_piece_length(val.extract()?), "seed_size" => builder.seed_size(val.extract()?), "initial_alphabet" => { let alphabet: Vec<String> = val.extract()?; builder.initial_alphabet( alphabet .into_iter() .filter_map(|s| s.chars().next()) .collect(), ) } "special_tokens" => builder.special_tokens( val.downcast::<PyList>()? .into_iter() .map(|token| { if let Ok(content) = token.extract::<String>() { Ok(PyAddedToken::from(content, Some(true)).get_token()) } else if let Ok(mut token) = token.extract::<PyRefMut<PyAddedToken>>() { token.special = true; Ok(token.get_token()) } else { Err(exceptions::PyTypeError::new_err( "special_tokens must be a List[Union[str, AddedToken]]", )) } }) .collect::<PyResult<Vec<_>>>()?, ), _ => { println!("Ignored unknown kwargs option {key}"); &mut builder } }; } } let trainer: tokenizers::models::unigram::UnigramTrainer = builder.build().map_err(|e| { exceptions::PyException::new_err(format!("Cannot build UnigramTrainer: {e}")) })?; Ok((PyUnigramTrainer {}, trainer.into())) } } /// Trainers Module #[pymodule] pub fn trainers(m: &Bound<'_, PyModule>) -> PyResult<()> { m.add_class::<PyTrainer>()?; m.add_class::<PyBpeTrainer>()?; m.add_class::<PyWordPieceTrainer>()?; m.add_class::<PyWordLevelTrainer>()?; m.add_class::<PyUnigramTrainer>()?; Ok(()) } #[cfg(test)] mod tests { use super::*; use tk::models::bpe::trainer::BpeTrainer; #[test] fn get_subtype() { Python::with_gil(|py| { let py_trainer = PyTrainer::new(Arc::new(RwLock::new(BpeTrainer::default().into()))); let py_bpe = py_trainer.get_as_subtype(py).unwrap(); assert_eq!("BpeTrainer", py_bpe.bind(py).get_type().qualname().unwrap()); }) } }
tokenizers/bindings/python/src/trainers.rs/0
{ "file_path": "tokenizers/bindings/python/src/trainers.rs", "repo_id": "tokenizers", "token_count": 17893 }
325
import json import pickle import pytest from tokenizers import Tokenizer from tokenizers.models import BPE from tokenizers.pre_tokenizers import ByteLevel as ByteLevelPreTokenizer from tokenizers.processors import ( BertProcessing, ByteLevel, PostProcessor, RobertaProcessing, Sequence, TemplateProcessing, ) from ..utils import data_dir, roberta_files class TestBertProcessing: def test_instantiate(self): processor = BertProcessing(("[SEP]", 0), ("[CLS]", 1)) assert processor is not None assert isinstance(processor, PostProcessor) assert isinstance(processor, BertProcessing) assert isinstance( pickle.loads(pickle.dumps(BertProcessing(("[SEP]", 0), ("[CLS]", 1)))), BertProcessing, ) def test_processing(self): tokenizer = Tokenizer(BPE()) tokenizer.add_special_tokens(["[SEP]", "[CLS]"]) tokenizer.add_tokens(["my", "name", "is", "john", "pair"]) tokenizer.post_processor = BertProcessing(("[SEP]", 0), ("[CLS]", 1)) output = tokenizer.encode("my name", "pair") assert output.tokens == ["[CLS]", "my", "name", "[SEP]", "pair", "[SEP]"] assert output.ids == [1, 2, 3, 0, 6, 0] class TestRobertaProcessing: def test_instantiate(self): processor = RobertaProcessing(("</s>", 1), ("<s>", 0)) assert processor is not None assert isinstance(processor, PostProcessor) assert isinstance(processor, RobertaProcessing) assert isinstance( pickle.loads(pickle.dumps(RobertaProcessing(("</s>", 1), ("<s>", 0)))), RobertaProcessing, ) def test_processing(self): tokenizer = Tokenizer(BPE()) tokenizer.add_special_tokens(["<s>", "</s>"]) tokenizer.add_tokens(["my", "name", "is", "john", "pair"]) tokenizer.post_processor = RobertaProcessing(("</s>", 1), ("<s>", 0)) output = tokenizer.encode("my name", "pair") assert output.tokens == ["<s>", "my", "name", "</s>", "</s>", "pair", "</s>"] assert output.ids == [0, 2, 3, 1, 1, 6, 1] class TestByteLevelProcessing: def test_instantiate(self): assert ByteLevel() is not None assert ByteLevel(trim_offsets=True) is not None assert isinstance(ByteLevel(), PostProcessor) assert isinstance(ByteLevel(), ByteLevel) assert isinstance(pickle.loads(pickle.dumps(ByteLevel())), ByteLevel) def test_processing(self, roberta_files): # Deprecated in 0.9 with pytest.deprecated_call(): tokenizer = Tokenizer(BPE(roberta_files["vocab"], roberta_files["merges"])) tokenizer.pre_tokenizer = ByteLevelPreTokenizer(add_prefix_space=True) # Keeps original offsets output = tokenizer.encode("My name is John") assert output.tokens == ["ĠMy", "Ġname", "Ġis", "ĠJohn"] assert output.offsets == [(0, 2), (2, 7), (7, 10), (10, 15)] # Trims offsets when activated tokenizer.post_processor = ByteLevel(trim_offsets=True) output = tokenizer.encode("My name is John") assert output.tokens == ["ĠMy", "Ġname", "Ġis", "ĠJohn"] assert output.offsets == [(0, 2), (3, 7), (8, 10), (11, 15)] def test_manual_reload(self): byte_level = ByteLevel() state = json.loads(byte_level.__getstate__()) reloaded = ByteLevel(**state) assert isinstance(reloaded, ByteLevel) class TestTemplateProcessing: def get_bert(self): return TemplateProcessing( single=["[CLS]", "$0", "[SEP]"], pair=["[CLS]", "$A", "[SEP]", "$B:1", "[SEP]:1"], special_tokens=[("[CLS]", 1), ("[SEP]", 0)], ) def get_roberta(self): return TemplateProcessing( single="<s> $0 </s>", pair="<s> $A </s> </s> $B </s>", special_tokens=[("<s>", 0), ("</s>", 1)], ) def get_t5_squad(self): # >>> from transformers import AutoTokenizer # >>> tok = AutoTokenizer.from_pretrained("t5-small") # >>> tok.tokenize("question: ") # ['▁question', ':'] # >>> tok.tokenize("context: ") # ['▁context', ':'] # >>> tok.encode("context: ") # [2625, 10] # >>> tok.encode("question: ") # [822, 10] return TemplateProcessing( single=["$0"], pair=["Q", "$A", "C", "$B"], special_tokens=[ { "id": "Q", "ids": [2625, 10], "tokens": ["_question", ":"], }, { "id": "C", "ids": [822, 10], "tokens": ["_context", ":"], }, ], ) def test_instantiate(self): bert = self.get_bert() assert bert is not None assert isinstance(bert, PostProcessor) assert isinstance(bert, TemplateProcessing) assert isinstance(pickle.loads(pickle.dumps(bert)), TemplateProcessing) # It is absolutely legal to have tokens with spaces in the name: TemplateProcessing( single=["[ C L S ]", "Token with space"], special_tokens=[("[ C L S ]", 0), ("Token with space", 1)], ) # Sequence identifiers must be well formed: with pytest.raises(Exception, match="Cannot build Piece"): TemplateProcessing(single="[CLS] $$ [SEP]") with pytest.raises(Exception, match="Cannot build Piece"): TemplateProcessing(single="[CLS] $A: [SEP]") # Special tokens must be provided when used in template: with pytest.raises(Exception, match="Missing SpecialToken\\(s\\) with id\\(s\\)"): TemplateProcessing(single=["[CLS]"]) def test_bert_parity(self): tokenizer = Tokenizer(BPE()) tokenizer.add_special_tokens(["[SEP]", "[CLS]"]) tokenizer.add_tokens(["my", "name", "is", "john", "pair"]) tokenizer.post_processor = BertProcessing(("[SEP]", 0), ("[CLS]", 1)) original = tokenizer.encode("my name", "pair") tokenizer.post_processor = self.get_bert() template = tokenizer.encode("my name", "pair") assert original.ids == template.ids def test_roberta_parity(self): tokenizer = Tokenizer(BPE()) tokenizer.add_special_tokens(["<s>", "</s>"]) tokenizer.add_tokens(["my", "name", "is", "john", "pair"]) tokenizer.post_processor = RobertaProcessing(("</s>", 1), ("<s>", 0)) original = tokenizer.encode("my name is john", "pair") tokenizer.post_processor = self.get_roberta() template = tokenizer.encode("my name is john", "pair") assert original.ids == template.ids class TestSequenceProcessing: def test_sequence_processing(self): assert Sequence([]) is not None assert Sequence([ByteLevel()]) is not None assert isinstance(Sequence([]), PostProcessor) assert isinstance(Sequence([]), Sequence) serialized = pickle.dumps(Sequence([])) assert isinstance(pickle.loads(serialized), Sequence) def test_post_process(self): byte_level = ByteLevel(trim_offsets=True) template = TemplateProcessing( single=["[CLS]", "$0", "[SEP]"], pair=["[CLS]:0", "$A", "[SEP]:0", "$B:1", "[SEP]:1"], special_tokens=[("[CLS]", 1), ("[SEP]", 0)], ) tokenizer = Tokenizer(BPE()) tokenizer.add_special_tokens(["[SEP]", "[CLS]"]) tokenizer.add_tokens(["my", "name", "is", "Ġjohn", "pair"]) tokenizer.post_processor = template # Before the sequence original = tokenizer.encode("my name is Ġjohn") assert original.ids == [1, 2, 3, 4, 5, 0] assert original.type_ids == [0, 0, 0, 0, 0, 0] assert original.offsets == [(0, 0), (0, 2), (3, 7), (8, 10), (11, 16), (0, 0)] pair = tokenizer.encode("my name is Ġjohn", "pair") # assert pair.ids == [1, 2, 3, 4, 5, 0, 6, 0] assert pair.type_ids == [0, 0, 0, 0, 0, 0, 1, 1] assert pair.offsets == [(0, 0), (0, 2), (3, 7), (8, 10), (11, 16), (0, 0), (0, 4), (0, 0)] processor = Sequence([byte_level, template]) tokenizer.post_processor = processor original = tokenizer.encode("my name is Ġjohn") assert original.ids == [1, 2, 3, 4, 5, 0] assert original.type_ids == [0, 0, 0, 0, 0, 0] # Offsets ARE trimmed assert original.offsets == [(0, 0), (0, 2), (3, 7), (8, 10), (12, 16), (0, 0)] pair = tokenizer.encode("my name is Ġjohn", "pair") # assert pair.ids == [1, 2, 3, 4, 5, 0, 6, 0] assert pair.type_ids == [0, 0, 0, 0, 0, 0, 1, 1] assert pair.offsets == [(0, 0), (0, 2), (3, 7), (8, 10), (12, 16), (0, 0), (0, 4), (0, 0)] def test_items(self): processors = Sequence([RobertaProcessing(("</s>", 1), ("<s>", 0)), ByteLevel()]) assert processors[0].__class__ == RobertaProcessing assert processors[1].__class__ == ByteLevel processors[0] = ByteLevel(add_prefix_space=False, trim_offsets=False, use_regex=False) print(processors[0]) processors[0].add_prefix_space = True processors[0].trim_offsets = True processors[0].use_regex = True print(processors[0]) assert processors[0].__class__ == ByteLevel assert processors[0].add_prefix_space assert processors[0].trim_offsets assert processors[0].use_regex
tokenizers/bindings/python/tests/bindings/test_processors.py/0
{ "file_path": "tokenizers/bindings/python/tests/bindings/test_processors.py", "repo_id": "tokenizers", "token_count": 4406 }
326
## Requirements In order to generate the documentation, it is necessary to have a Python environment with the following: ```python pip install sphinx sphinx_rtd_theme setuptools_rust ``` It is also necessary to have the `tokenizers` library in this same environment, for Sphinx to generate all the API Reference and links properly. If you want to visualize the documentation with some modifications made to the Python bindings, make sure you build it from source. ## Building the documentation Once everything is setup, you can build the documentation automatically for all the languages using the following command in the `/docs` folder: ```bash make html_all ``` If you want to build only for a specific language, you can use: ```bash make html O="-t python" ``` (Replacing `python` by the target language among `rust`, `node`, and `python`) **NOTE** If you are making any structural change to the documentation, it is recommended to clean the build directory before rebuilding: ```bash make clean && make html_all ```
tokenizers/docs/README.md/0
{ "file_path": "tokenizers/docs/README.md", "repo_id": "tokenizers", "token_count": 266 }
327
.. only:: python .. include:: python.inc .. only:: rust .. include:: rust.inc .. only:: node .. include:: node.inc
tokenizers/docs/source/api/reference.rst/0
{ "file_path": "tokenizers/docs/source/api/reference.rst", "repo_id": "tokenizers", "token_count": 47 }
328
DATA_DIR = data BENCHMARK_DIR = benches TESTS_DIR = tests dir_guard=@mkdir -p $(@D) SHARED_RESOURCES = $(DATA_DIR)/gpt2-vocab.json $(DATA_DIR)/gpt2-merges.txt $(DATA_DIR)/bert-base-uncased-vocab.txt $(DATA_DIR)/big.txt $(DATA_DIR)/small.txt $(DATA_DIR)/albert-base-v1-tokenizer.json $(DATA_DIR)/llama-3-tokenizer.json BENCHMARK_RESOURCES = $(SHARED_RESOURCES) TESTS_RESOURCES = $(SHARED_RESOURCES) $(DATA_DIR)/unigram.json $(DATA_DIR)/unigram_wagahaiwa_nekodearu.txt $(DATA_DIR)/roberta.json $(DATA_DIR)/tokenizer-wiki.json $(DATA_DIR)/bert-wiki.json .PHONY : build build : cargo build --all-targets .PHONY : release release : cargo build --release .PHONY : format format : cargo fmt -- .PHONY : lint lint : cargo fmt -- --check cargo fmt -- $(BENCHMARK_DIR)/*.rs --check cargo clippy --all-targets --all-features -- -D warnings .PHONY : test test : $(TESTS_RESOURCES) cargo test .PHONY : doc doc : cargo doc .PHONY : publish publish : cargo publish .PHONY : all-checks all-checks : lint test doc .PHONY : bench bench : $(BENCHMARK_RESOURCES) cargo bench -- --verbose $(DATA_DIR)/gpt2-% : $(dir_guard) wget https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-$* -O $@ $(DATA_DIR)/bert-% : $(dir_guard) wget https://s3.amazonaws.com/models.huggingface.co/bert/bert-$* -O $@ $(DATA_DIR)/unigram% : $(dir_guard) wget https://huggingface.co/Narsil/small/raw/main/unigram$* -O $@ $(DATA_DIR)/albert-base-v1-tokenizer.json : $(dir_guard) wget https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-v1-tokenizer.json -O $@ $(DATA_DIR)/tokenizer-llama3.json : $(dir_guard) wget https://huggingface.co/Narsil/llama-tokenizer/resolve/main/tokenizer.json -O $@ $(DATA_DIR)/big.txt : $(dir_guard) wget https://norvig.com/big.txt -O $@ $(DATA_DIR)/small.txt : $(DATA_DIR)/big.txt head -100 $(DATA_DIR)/big.txt > $@ $(DATA_DIR)/roberta.json : $(dir_guard) wget https://huggingface.co/Narsil/small/raw/main/roberta.json -O $@ $(DATA_DIR)/tokenizer-wiki.json : $(dir_guard) wget https://s3.amazonaws.com/models.huggingface.co/bert/anthony/doc-quicktour/tokenizer.json -O $@ $(DATA_DIR)/bert-wiki.json : $(dir_guard) wget https://s3.amazonaws.com/models.huggingface.co/bert/anthony/doc-pipeline/tokenizer.json -O $@ $(DATA_DIR)/llama-3-tokenizer.json : $(dir_guard) wget https://huggingface.co/hf-internal-testing/llama3-tokenizer/resolve/main/tokenizer.json -O $@
tokenizers/tokenizers/Makefile/0
{ "file_path": "tokenizers/tokenizers/Makefile", "repo_id": "tokenizers", "token_count": 1080 }
329
//! Test suite for the Web and headless browsers. #![cfg(target_arch = "wasm32")] extern crate wasm_bindgen_test; use wasm_bindgen_test::*; wasm_bindgen_test_configure!(run_in_browser); #[wasm_bindgen_test] fn pass() { assert_eq!(1 + 1, 2); }
tokenizers/tokenizers/examples/unstable_wasm/tests/web.rs/0
{ "file_path": "tokenizers/tokenizers/examples/unstable_wasm/tests/web.rs", "repo_id": "tokenizers", "token_count": 109 }
330
use super::model::Unigram; use serde::{ de::{Error, MapAccess, Visitor}, ser::SerializeStruct, Deserialize, Deserializer, Serialize, Serializer, }; impl Serialize for Unigram { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { let mut model = serializer.serialize_struct("Unigram", 3)?; model.serialize_field("type", "Unigram")?; model.serialize_field("unk_id", &self.unk_id)?; model.serialize_field("vocab", &self.vocab)?; model.serialize_field("byte_fallback", &self.byte_fallback())?; model.end() } } impl<'de> Deserialize<'de> for Unigram { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { deserializer.deserialize_struct( "Unigram", &["type", "vocab", "unk_id", "byte_fallback"], UnigramVisitor, ) } } struct UnigramVisitor; impl<'de> Visitor<'de> for UnigramVisitor { type Value = Unigram; fn expecting(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { write!(fmt, "struct Unigram") } fn visit_map<V>(self, mut map: V) -> std::result::Result<Self::Value, V::Error> where V: MapAccess<'de>, { let mut vocab: Option<Vec<(String, f64)>> = None; let mut unk_id: Option<usize> = None; let mut byte_fallback: bool = false; while let Some(key) = map.next_key::<String>()? { match key.as_ref() { "unk_id" => { unk_id = map.next_value()?; } "byte_fallback" => byte_fallback = map.next_value()?, "vocab" => vocab = Some(map.next_value()?), "type" => match map.next_value()? { "Unigram" => {} u => { return Err(serde::de::Error::invalid_value( serde::de::Unexpected::Str(u), &"Unigram", )) } }, _ => (), } } match (vocab, unk_id, byte_fallback) { (Some(vocab), unk_id, byte_fallback) => Ok(Unigram::from(vocab, unk_id, byte_fallback) .map_err(|err| Error::custom(format!("Unable to load vocab {err:?}")))?), (None, _, _) => Err(Error::custom("Missing vocab")), } } } #[cfg(test)] mod test { use super::*; #[test] fn test_serialization() { let vocab = vec![("<unk>".to_string(), 0.0), ("a".to_string(), -0.5)]; let model = Unigram::from(vocab, Some(0), false).unwrap(); let data = serde_json::to_string(&model).unwrap(); let reconstructed = serde_json::from_str(&data).unwrap(); assert_eq!(model, reconstructed); } #[test] fn test_serialization_unk_id_not_zero() { let vocab = vec![("a".to_string(), -0.5), ("<unk>".to_string(), 0.0)]; let model = Unigram::from(vocab, Some(1), false).unwrap(); let data = serde_json::to_string(&model).unwrap(); let reconstructed = serde_json::from_str(&data).unwrap(); assert_eq!(model, reconstructed); } #[test] fn test_serialization_no_unk_id() { let vocab = vec![("a".to_string(), -0.5)]; let model = Unigram::from(vocab, None, false).unwrap(); let data = serde_json::to_string(&model).unwrap(); let reconstructed = serde_json::from_str(&data).unwrap(); assert_eq!(model, reconstructed); } }
tokenizers/tokenizers/src/models/unigram/serialization.rs/0
{ "file_path": "tokenizers/tokenizers/src/models/unigram/serialization.rs", "repo_id": "tokenizers", "token_count": 1824 }
331
use crate::tokenizer::{NormalizedString, Normalizer, Result}; use crate::utils::macro_rules_attribute; #[derive(Default, Copy, Clone, Debug)] #[macro_rules_attribute(impl_serde_type!)] pub struct NFD; impl Normalizer for NFD { fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> { normalized.nfd(); Ok(()) } } #[derive(Default, Copy, Clone, Debug)] #[macro_rules_attribute(impl_serde_type!)] pub struct NFKD; impl Normalizer for NFKD { fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> { normalized.nfkd(); Ok(()) } } #[derive(Default, Copy, Clone, Debug)] #[macro_rules_attribute(impl_serde_type!)] pub struct NFC; impl Normalizer for NFC { fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> { normalized.nfc(); Ok(()) } } #[derive(Default, Copy, Clone, Debug)] #[macro_rules_attribute(impl_serde_type!)] pub struct NFKC; impl Normalizer for NFKC { fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> { normalized.nfkc(); Ok(()) } } fn do_nmt(normalized: &mut NormalizedString) { // Ascii Control characters normalized .filter(|c| { !matches!( c as u32, 0x0001..=0x0008 | 0x000B | 0x000E..=0x001F | 0x007F | 0x008F | 0x009F ) }) // Other code points considered as whitespace. .map(|c| match c as u32 { 0x0009 => ' ', 0x000A => ' ', 0x000C => ' ', 0x000D => ' ', 0x1680 => ' ', 0x200B..=0x200F => ' ', 0x2028 => ' ', 0x2029 => ' ', 0x2581 => ' ', 0xFEFF => ' ', 0xFFFD => ' ', _ => c, }); } #[derive(Default, Copy, Clone, Debug)] #[macro_rules_attribute(impl_serde_type!)] pub struct Nmt; impl Normalizer for Nmt { fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> { do_nmt(normalized); Ok(()) } } #[cfg(test)] mod tests { use super::*; #[test] fn test_nfkc() { let original = "\u{fb01}".to_string(); let normalized = "fi".to_string(); let mut n = NormalizedString::from(original.clone()); NFKC.normalize(&mut n).unwrap(); assert_eq!( n, NormalizedString::new(original, normalized, vec![(0, 3), (0, 3)], 0) ); assert_eq!(n.alignments_original(), vec![(0, 2), (0, 2), (0, 2)]); } }
tokenizers/tokenizers/src/normalizers/unicode.rs/0
{ "file_path": "tokenizers/tokenizers/src/normalizers/unicode.rs", "repo_id": "tokenizers", "token_count": 1317 }
332
use crate::tokenizer::{Encoding, PostProcessor, Result}; use ahash::AHashMap; use serde::{Deserialize, Serialize}; use std::iter::FromIterator; #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] #[serde(tag = "type")] pub struct BertProcessing { pub sep: (String, u32), pub cls: (String, u32), } impl Default for BertProcessing { fn default() -> Self { Self { sep: ("[SEP]".into(), 102), cls: ("[CLS]".into(), 101), } } } impl BertProcessing { pub fn new(sep: (String, u32), cls: (String, u32)) -> Self { Self { sep, cls } } pub fn get_sep_copy(&self) -> (String, u32) { (self.sep.0.clone(), self.sep.1) } pub fn get_cls_copy(&self) -> (String, u32) { (self.cls.0.clone(), self.cls.1) } } #[derive(thiserror::Error, Debug)] pub enum BertProcessorError { #[error("encodings vector length must be either 1 or 2")] InvalidEncodingsVecLength, } impl PostProcessor for BertProcessing { fn added_tokens(&self, is_pair: bool) -> usize { if is_pair { 3 } else { 2 } } fn process_encodings( &self, mut encodings: Vec<Encoding>, add_special_tokens: bool, ) -> Result<Vec<Encoding>> { if !add_special_tokens { return Ok(encodings); } let encodings: Vec<Encoding> = encodings .iter_mut() .enumerate() .map(|(i, encoding)| { if i == 0 { let ids = [&[self.cls.1], encoding.get_ids(), &[self.sep.1]].concat(); let type_ids = [&[0], encoding.get_type_ids(), &[0]].concat(); let tokens = [ std::slice::from_ref(&self.cls.0), encoding.get_tokens(), std::slice::from_ref(&self.sep.0), ] .concat(); let words = [&[None], encoding.get_word_ids(), &[None]].concat(); let offsets = [&[(0, 0)], encoding.get_offsets(), &[(0, 0)]].concat(); let special_tokens = [&[1u32], &vec![0; encoding.get_ids().len()][..], &[1]].concat(); let attention_mask = vec![1; ids.len()]; // For compatibility with `TemplateProcessing`, the sequence_ranges shouldn't contain // the special tokens. let sequence_ranges = AHashMap::from_iter(vec![(0, 1..ids.len() - 1)]); Encoding::new( ids, type_ids, tokens, words, offsets, special_tokens, attention_mask, encoding .take_overflowing() .into_iter() .map(|encoding| { let ids = [&[self.cls.1], encoding.get_ids(), &[self.sep.1]].concat(); let type_ids = [&[0], encoding.get_type_ids(), &[0]].concat(); let tokens = [ std::slice::from_ref(&self.cls.0), encoding.get_tokens(), std::slice::from_ref(&self.sep.0), ] .concat(); let words = [&[None], encoding.get_word_ids(), &[None]].concat(); let offsets = [&[(0, 0)], encoding.get_offsets(), &[(0, 0)]].concat(); let special_tokens = [&[1u32], &vec![0; encoding.get_ids().len()][..], &[1]] .concat(); let attention_mask = vec![1; ids.len()]; // For compatibility with `TemplateProcessing`, the sequence_ranges shouldn't // contain the special tokens. let sequence_ranges = AHashMap::from_iter(vec![(0, 1..ids.len() - 1)]); Encoding::new( ids, type_ids, tokens, words, offsets, special_tokens, attention_mask, vec![], sequence_ranges, ) }) .collect(), sequence_ranges, ) } else { let pair_ids = [encoding.get_ids(), &[self.sep.1]].concat(); let pair_type_ids = [encoding.get_type_ids(), &[1]].concat(); let pair_tokens = [encoding.get_tokens(), std::slice::from_ref(&self.sep.0)].concat(); let pair_words = [encoding.get_word_ids(), &[None]].concat(); let pair_offsets = [encoding.get_offsets(), &[(0, 0)]].concat(); let pair_special_tokens = [&vec![0u32; encoding.get_type_ids().len()][..], &[1]].concat(); let pair_attention_mask = vec![1; pair_ids.len()]; // For compatibility with `TemplateProcessing`, the sequence_ranges shouldn't contain // the special tokens. let pair_sequence_ranges = AHashMap::from_iter(vec![(1, 0..pair_ids.len() - 1)]); Encoding::new( pair_ids, pair_type_ids, pair_tokens, pair_words, pair_offsets, pair_special_tokens, pair_attention_mask, encoding .take_overflowing() .into_iter() .map(|encoding| { let pair_ids = [encoding.get_ids(), &[self.sep.1]].concat(); let pair_type_ids = [encoding.get_type_ids(), &[1]].concat(); let pair_tokens = [encoding.get_tokens(), std::slice::from_ref(&self.sep.0)] .concat(); let pair_words = [encoding.get_word_ids(), &[None]].concat(); let pair_offsets = [encoding.get_offsets(), &[(0, 0)]].concat(); let pair_special_tokens = [&vec![0u32; encoding.get_type_ids().len()][..], &[1]].concat(); let pair_attention_mask = vec![1; pair_ids.len()]; // For compatibility with `TemplateProcessing`, the sequence_ranges // shouldn't contain the special tokens. let pair_sequence_ranges = AHashMap::from_iter(vec![(1, 0..pair_ids.len() - 1)]); Encoding::new( pair_ids, pair_type_ids, pair_tokens, pair_words, pair_offsets, pair_special_tokens, pair_attention_mask, vec![], pair_sequence_ranges, ) }) .collect(), pair_sequence_ranges, ) } }) .collect(); Ok(encodings) } } #[cfg(test)] mod tests { use super::*; #[test] fn serde() { let bert = BertProcessing::default(); let bert_r = r#"{"type":"BertProcessing","sep":["[SEP]",102],"cls":["[CLS]",101]}"#; assert_eq!(serde_json::to_string(&bert).unwrap(), bert_r); assert_eq!( serde_json::from_str::<BertProcessing>(bert_r).unwrap(), bert ); } #[test] fn bert_processing() { let processor = BertProcessing::default(); assert_eq!(processor.added_tokens(false), 2); assert_eq!(processor.added_tokens(true), 3); use crate::Token; let encoding = Encoding::from_tokens( vec![ Token::new(12, "Hello".into(), (0, 5)), Token::new(14, "there".into(), (6, 11)), ], 0, ); let pair = Encoding::from_tokens(vec![Token::new(15, "pair".into(), (0, 4))], 0); let single_encoding = processor.process(encoding.clone(), None, true).unwrap(); assert_eq!( single_encoding, Encoding::new( vec![101, 12, 14, 102], vec![0, 0, 0, 0], vec![ "[CLS]".into(), "Hello".into(), "there".into(), "[SEP]".into() ], vec![None, None, None, None], vec![(0, 0), (0, 5), (6, 11), (0, 0)], vec![1, 0, 0, 1], vec![1, 1, 1, 1], vec![], AHashMap::from_iter(vec![(0, 1..3)]), ) ); assert_eq!(single_encoding.token_to_sequence(2), Some(0)); assert_eq!(single_encoding.token_to_sequence(3), None); let pair_encoding = processor .process(encoding.clone(), Some(pair.clone()), true) .unwrap(); assert_eq!( pair_encoding, Encoding::new( vec![101, 12, 14, 102, 15, 102], vec![0, 0, 0, 0, 1, 1], vec![ "[CLS]".into(), "Hello".into(), "there".into(), "[SEP]".into(), "pair".into(), "[SEP]".into() ], vec![None, None, None, None, None, None], vec![(0, 0), (0, 5), (6, 11), (0, 0), (0, 4), (0, 0)], vec![1, 0, 0, 1, 0, 1], vec![1, 1, 1, 1, 1, 1], vec![], AHashMap::from_iter(vec![(0, 1..3), (1, 4..5)]), ) ); assert_eq!(pair_encoding.token_to_sequence(2), Some(0)); assert_eq!(pair_encoding.token_to_sequence(3), None); assert_eq!(pair_encoding.token_to_sequence(4), Some(1)); assert_eq!(pair_encoding.token_to_sequence(5), None); // No special tokens let pair_encoding = processor.process(encoding, Some(pair), false).unwrap(); assert_eq!( pair_encoding, Encoding::new( vec![12, 14, 15], vec![0, 0, 1], vec!["Hello".into(), "there".into(), "pair".into(),], vec![None, None, None], vec![(0, 5), (6, 11), (0, 4)], vec![0, 0, 0], vec![1, 1, 1], vec![], AHashMap::from_iter(vec![(0, 0..2), (1, 2..3)]), ) ); assert_eq!(pair_encoding.token_to_sequence(0), Some(0)); assert_eq!(pair_encoding.token_to_sequence(1), Some(0)); assert_eq!(pair_encoding.token_to_sequence(2), Some(1)); } }
tokenizers/tokenizers/src/processors/bert.rs/0
{ "file_path": "tokenizers/tokenizers/src/processors/bert.rs", "repo_id": "tokenizers", "token_count": 7594 }
333
pub(crate) mod cache; #[cfg(feature = "http")] pub(crate) mod from_pretrained; #[cfg(all(feature = "fancy-regex", not(feature = "onig")))] mod fancy; #[cfg(all(feature = "fancy-regex", not(feature = "onig")))] pub use fancy::SysRegex; #[cfg(feature = "onig")] mod onig; #[cfg(feature = "onig")] pub use crate::utils::onig::SysRegex; #[cfg(not(any(feature = "onig", feature = "fancy-regex")))] compile_error!("One of the `onig`, or `fancy-regex` features must be enabled"); pub mod iter; pub mod padding; pub mod parallelism; pub(crate) mod progress; pub mod truncation; use ahash::AHashMap; use serde::{Serialize, Serializer}; use std::collections::BTreeMap; pub(crate) fn ordered_map<S, K, V>( value: &AHashMap<K, V>, serializer: S, ) -> std::result::Result<S::Ok, S::Error> where S: Serializer, K: Serialize + std::cmp::Ord, V: Serialize, { let ordered: BTreeMap<_, _> = value.iter().collect(); ordered.serialize(serializer) } macro_rules! impl_enum_from ( ($from_ty:ty, $enum:ty, $variant:ident) => { impl From<$from_ty> for $enum { fn from(from: $from_ty) -> Self { <$enum>::$variant(from) } } } ); /// Implement `serde::{Serialize, Serializer}` with `#[serde(tag = "type")]` attribute for a given struct. /// Panic when a json string being deserilized misses field `type`. /// /// # Examples /// /// ``` /// # #[macro_use] extern crate tokenizers; /// use serde::{Serialize, Deserialize}; /// /// fn main() { /// impl_serde_type!{ /// #[derive(Debug)] /// struct Point { /// x: i32, /// #[serde(default = "default_y")] /// y: i32, /// } /// } /// fn default_y() -> i32 { /// 5 /// } /// /// let point = Point { x: 1, y: 2 }; /// let serialized_s = r#"{"type":"Point","x":1,"y":2}"#; /// assert_eq!(serde_json::to_string(&point).unwrap(), serialized_s); /// } /// ``` /// /// ```should_panic /// # #[macro_use] extern crate tokenizers; /// use serde::{Serialize, Deserialize}; /// /// fn main() { /// impl_serde_type!{ /// #[derive(Debug)] /// struct Point1D { /// x: i32, /// } /// } /// /// let serialized_s = r#"{"x":1}"#; /// let deserialized: Point1D = serde_json::from_str(serialized_s).unwrap(); /// } /// ``` /// /// # Examples (unit structs) /// /// ``` /// # #[macro_use] extern crate tokenizers; /// use serde::{Serialize, Deserialize}; /// /// fn main() { /// impl_serde_type!{ /// struct Unit; /// } /// /// let unit = Unit; /// let serialized_s = r#"{"type":"Unit"}"#; /// assert_eq!(serde_json::to_string(&unit).unwrap(), serialized_s); /// } /// ``` /// /// ```should_panic /// # #[macro_use] extern crate tokenizers; /// use serde::{Serialize, Deserialize}; /// /// fn main() { /// impl_serde_type!{ /// struct Unit; /// } /// /// let serialized_s = r#"{"some_field":1}"#; /// let deserialized: Unit = serde_json::from_str(serialized_s).unwrap(); /// } /// ``` #[macro_export] macro_rules! impl_serde_type{ ( $(#[$meta:meta])* $vis:vis struct $struct_name:ident { $( $(#[$field_meta:meta])* $field_vis:vis $field_name:ident : $field_type:ty ),*$(,)+ } ) => { paste::paste!{ $(#[$meta])* #[derive(Serialize, Deserialize)] #[serde(tag = "type", from = $struct_name "Deserializer")] $vis struct $struct_name{ $( $(#[$field_meta])* $field_vis $field_name : $field_type, )* } #[doc(hidden)] $(#[$meta])* #[derive(Deserialize)] #[serde(tag = "type", remote = $struct_name "")] struct [<$struct_name Def>]{ $( $(#[$field_meta])* $field_vis $field_name : $field_type, )* } #[doc(hidden)] #[derive(Deserialize)] enum [<$struct_name Type>] { $struct_name, } #[doc(hidden)] #[derive(Deserialize)] struct [<$struct_name Deserializer>] { #[allow(dead_code)] r#type: [<$struct_name Type>], #[serde(flatten, with = $struct_name "Def")] r#struct: $struct_name, } #[doc(hidden)] impl std::convert::From<[<$struct_name Deserializer>]> for $struct_name { fn from(v: [<$struct_name Deserializer>]) -> Self { v.r#struct } } } }; ( $(#[$meta:meta])* $vis:vis struct $struct_name:ident; ) => { paste::paste!{ $(#[$meta])* $vis struct $struct_name; impl serde::Serialize for $struct_name { fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error> where S: serde::ser::Serializer { let helper = [<$struct_name Helper>]{r#type: [<$struct_name Type>]::$struct_name}; helper.serialize(serializer) } } impl<'de> serde::Deserialize<'de> for $struct_name { fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error> where D: serde::Deserializer<'de>, { let _helper = [<$struct_name Helper>]::deserialize(deserializer)?; Ok($struct_name) } } #[derive(serde::Serialize, serde::Deserialize)] enum [<$struct_name Type>] { $struct_name, } #[derive(serde::Serialize, serde::Deserialize)] struct [<$struct_name Helper>] { #[allow(dead_code)] r#type: [<$struct_name Type>], } } } } // Re-export macro_rules_attribute pub use macro_rules_attribute::macro_rules_attribute;
tokenizers/tokenizers/src/utils/mod.rs/0
{ "file_path": "tokenizers/tokenizers/src/utils/mod.rs", "repo_id": "tokenizers", "token_count": 3161 }
334
// Based on [this tutorial](https://github.com/jsdoc2md/jsdoc-to-markdown/wiki/How-to-create-one-output-file-per-class). import fs from 'fs'; import path from 'path'; import url from 'url'; import jsdoc2md from 'jsdoc-to-markdown'; const docs = path.dirname(path.dirname(url.fileURLToPath(import.meta.url))); const root = path.dirname(docs); // jsdoc config file const conf = path.join(docs, 'jsdoc-conf.json'); // input and output paths const inputFile = path.join(root, '/src/**/*.js'); const outputDir = path.join(root, '/docs/source/api/'); // get template data const templateData = await jsdoc2md.getTemplateData({ files: inputFile, configure: conf }) // reduce templateData to an array of module names const moduleNames = templateData.reduce( (moduleNames, identifier) => { if (identifier.kind === 'module') { moduleNames.push(identifier.name) } return moduleNames }, [] ) // create a documentation file for each module for (const moduleName of moduleNames) { const template = `{{#module name="${moduleName}"}}{{>docs}}{{/module}}`; console.log(`rendering ${moduleName}, template: ${template}`); let output = await jsdoc2md.render({ 'data': templateData, 'template': template, 'heading-depth': 1, 'no-gfm': true, 'name-format': 'backticks', 'no-cache': true, 'separators': true, 'configure': conf, }); // Post-processing output = output.replace(/(^#+\s.+)/gm, '$1\n'); // Add new line after each header // Replace all generated marker names with ids (for linking), and add group class output = output.replace(/<a name="(\S+)"><\/a>/g, '<a id="$1" class="group"></a>'); // Unescape some of the characters which jsdoc2md escapes: // TODO: May need to extend this list output = output.replace(/\\([|_&*])/gm, '$1'); output = output.replaceAll('new exports.', 'new '); let outputPath = path.resolve(outputDir, `${moduleName}.md`); fs.mkdirSync(path.dirname(outputPath), { recursive: true }); fs.writeFileSync(outputPath, output); }
transformers.js/docs/scripts/generate.js/0
{ "file_path": "transformers.js/docs/scripts/generate.js", "repo_id": "transformers.js", "token_count": 790 }
335
# Installation <include> { "path": "../snippets/2_installation.snippet" } </include>
transformers.js/docs/source/installation.md/0
{ "file_path": "transformers.js/docs/source/installation.md", "repo_id": "transformers.js", "token_count": 38 }
336
import { defineConfig } from 'vite' import react from '@vitejs/plugin-react' // https://vitejs.dev/config/ export default defineConfig({ plugins: [react()], })
transformers.js/examples/cross-encoder/vite.config.js/0
{ "file_path": "transformers.js/examples/cross-encoder/vite.config.js", "repo_id": "transformers.js", "token_count": 54 }
337
# Transformers.js - Sample Electron application An example project to show how to run 🤗 Transformers in an [Electron](https://www.electronjs.org/) application. ## Getting Started 1. Clone the repo and enter the project directory: ```bash git clone https://github.com/huggingface/transformers.js.git cd transformers.js/examples/electron/ ``` 1. Install the necessary dependencies: ```bash npm install ``` 1. Run the application: ```bash npm run start ``` After a few seconds, a new window should pop up on your screen! ## Editing the template All source code can be found in `./src/`: - `index.js` - Serves as the entry point for the application's main process. When an Electron app is launched, this is the first file that gets executed, and it is responsible for setting up the main process of the application. You will need to restart the application after editing this file for your changes to take effect. - `preload.js` - Used to preload scripts and modules in a renderer process before any other scripts run. In our case, we use the `contextBridge` API to expose the `run` function to the renderer, which runs the model in the background. You will need to restart the application after editing this file for your changes to take effect. - `model.js` - Contains all the logic for loading the model and running predictions. You will need to restart the application after editing this file for your changes to take effect. - `client.js` - Handles interaction with the interface, as well as communication between the renderer thread (UI) and the worker thread (processing). To see changes made to this file made while editing, simply refresh the window (<kbd>Ctrl + R</kbd> or "View" &rarr; "Reload"). - `index.html`, `index.css` - The user interface which is displayed to the user. To see changes made to this file made while editing, simply refresh the window (<kbd>Ctrl + R</kbd> or "View" &rarr; "Reload").
transformers.js/examples/electron/README.md/0
{ "file_path": "transformers.js/examples/electron/README.md", "repo_id": "transformers.js", "token_count": 528 }
338
// background.js - Handles requests from the UI, runs the model, then sends back a response import { pipeline } from '@huggingface/transformers'; class PipelineSingleton { static task = 'text-classification'; static model = 'Xenova/distilbert-base-uncased-finetuned-sst-2-english'; static instance = null; static async getInstance(progress_callback = null) { this.instance ??= pipeline(this.task, this.model, { progress_callback }); return this.instance; } } // Create generic classify function, which will be reused for the different types of events. const classify = async (text) => { // Get the pipeline instance. This will load and build the model when run for the first time. let model = await PipelineSingleton.getInstance((data) => { // You can track the progress of the pipeline creation here. // e.g., you can send `data` back to the UI to indicate a progress bar // console.log('progress', data) }); // Actually run the model on the input text let result = await model(text); return result; }; ////////////////////// 1. Context Menus ////////////////////// // // Add a listener to create the initial context menu items, // context menu items only need to be created at runtime.onInstalled chrome.runtime.onInstalled.addListener(function () { // Register a context menu item that will only show up for selection text. chrome.contextMenus.create({ id: 'classify-selection', title: 'Classify "%s"', contexts: ['selection'], }); }); // Perform inference when the user clicks a context menu chrome.contextMenus.onClicked.addListener(async (info, tab) => { // Ignore context menu clicks that are not for classifications (or when there is no input) if (info.menuItemId !== 'classify-selection' || !info.selectionText) return; // Perform classification on the selected text let result = await classify(info.selectionText); // Do something with the result chrome.scripting.executeScript({ target: { tabId: tab.id }, // Run in the tab that the user clicked in args: [result], // The arguments to pass to the function function: (result) => { // The function to run // NOTE: This function is run in the context of the web page, meaning that `document` is available. console.log('result', result) console.log('document', document) }, }); }); ////////////////////////////////////////////////////////////// ////////////////////// 2. Message Events ///////////////////// // // Listen for messages from the UI, process it, and send the result back. chrome.runtime.onMessage.addListener((message, sender, sendResponse) => { console.log('sender', sender) if (message.action !== 'classify') return; // Ignore messages that are not meant for classification. // Run model prediction asynchronously (async function () { // Perform classification let result = await classify(message.text); // Send response back to UI sendResponse(result); })(); // return true to indicate we will send a response asynchronously // see https://stackoverflow.com/a/46628145 for more information return true; }); //////////////////////////////////////////////////////////////
transformers.js/examples/extension/src/background.js/0
{ "file_path": "transformers.js/examples/extension/src/background.js", "repo_id": "transformers.js", "token_count": 1038 }
339
/** @type {import('tailwindcss').Config} */ module.exports = { content: [ './src/pages/**/*.{js,ts,jsx,tsx,mdx}', './src/components/**/*.{js,ts,jsx,tsx,mdx}', './src/app/**/*.{js,ts,jsx,tsx,mdx}', ], theme: { extend: { backgroundImage: { 'gradient-radial': 'radial-gradient(var(--tw-gradient-stops))', 'gradient-conic': 'conic-gradient(from 180deg at 50% 50%, var(--tw-gradient-stops))', }, }, }, plugins: [], }
transformers.js/examples/next-client/tailwind.config.js/0
{ "file_path": "transformers.js/examples/next-client/tailwind.config.js", "repo_id": "transformers.js", "token_count": 236 }
340
export default function Progress({ text, percentage }) { percentage = percentage ?? 0; return ( <div className="progress-container"> <div className='progress-bar' style={{ 'width': `${percentage}%` }}>{text} ({`${percentage.toFixed(2)}%`})</div> </div> ); }
transformers.js/examples/react-translator/src/components/Progress.jsx/0
{ "file_path": "transformers.js/examples/react-translator/src/components/Progress.jsx", "repo_id": "transformers.js", "token_count": 99 }
341
{ "name": "segment-anything-client", "private": true, "version": "0.0.0", "type": "module", "scripts": { "dev": "vite", "build": "vite build", "preview": "vite preview" }, "dependencies": { "@huggingface/transformers": "^3.0.0-alpha.0" }, "devDependencies": { "vite": "^5.2.9" } }
transformers.js/examples/segment-anything-client/package.json/0
{ "file_path": "transformers.js/examples/segment-anything-client/package.json", "repo_id": "transformers.js", "token_count": 152 }
342
export const SPEAKERS = { "US female 1": "cmu_us_slt_arctic-wav-arctic_a0001", "US female 2": "cmu_us_clb_arctic-wav-arctic_a0001", "US male 1": "cmu_us_bdl_arctic-wav-arctic_a0003", "US male 2": "cmu_us_rms_arctic-wav-arctic_a0003", "Canadian male": "cmu_us_jmk_arctic-wav-arctic_a0002", "Scottish male": "cmu_us_awb_arctic-wav-arctic_b0002", "Indian male": "cmu_us_ksp_arctic-wav-arctic_a0007", } export const DEFAULT_SPEAKER = "cmu_us_slt_arctic-wav-arctic_a0001";
transformers.js/examples/text-to-speech-client/src/constants.js/0
{ "file_path": "transformers.js/examples/text-to-speech-client/src/constants.js", "repo_id": "transformers.js", "token_count": 247 }
343
import { Fragment } from 'react'; const COLOURS = [ 'bg-purple-300', 'bg-green-300', 'bg-yellow-300', 'bg-red-300', 'bg-blue-300', ] export function Token({ text, position, margin }) { const textWithLineBreaks = text.split('\n').map((line, index, array) => ( <Fragment key={index}> {line} {index !== array.length - 1 && <br />} </Fragment> )); return (<span style={{ marginLeft: margin }} className={`leading-5 ${textWithLineBreaks.length === 1 ? 'inline-block ' : ''}${COLOURS[position % COLOURS.length]}`}> {textWithLineBreaks} </span>) }
transformers.js/examples/tokenizer-playground/src/components/Token.jsx/0
{ "file_path": "transformers.js/examples/tokenizer-playground/src/components/Token.jsx", "repo_id": "transformers.js", "token_count": 287 }
344
<!doctype html> <html lang="en"> <head> <meta charset="UTF-8" /> <link rel="icon" type="image/svg+xml" href="/vite.svg" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" /> <title>Moondream WebGPU</title> </head> <body> <div id="root"></div> <script type="module" src="/src/main.jsx"></script> </body> </html>
transformers.js/examples/webgpu-vlm/index.html/0
{ "file_path": "transformers.js/examples/webgpu-vlm/index.html", "repo_id": "transformers.js", "token_count": 157 }
345
# MIT License # # Copyright (c) Microsoft Corporation, Hugging Face. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from typing import Optional import itertools import numpy as np import onnx import packaging.version as pv import warnings from onnx import helper, numpy_helper from onnx import onnx_pb as onnx_proto import onnxslim.third_party.onnx_graphsurgeon as gs FLOAT32 = 1 FLOAT16 = 10 def _npfloat16_to_int(np_list): """ Convert numpy float16 to python int. :param np_list: numpy float16 list :return int_list: python int list """ return [int(bin(_.view("H"))[2:].zfill(16), 2) for _ in np_list] def convert_np_to_float16(np_array, min_positive_val=1e-7, max_finite_val=1e4): """ Convert float32 numpy array to float16 without changing sign or finiteness. Positive values less than min_positive_val are mapped to min_positive_val. Positive finite values greater than max_finite_val are mapped to max_finite_val. Similar for negative values. NaN, 0, inf, and -inf are unchanged. """ def between(a, b, c): return np.logical_and(a < b, b < c) positive_values = np_array[np.where(np_array > 0)] if positive_values.shape[0] > 0: pos_max = positive_values.max() pos_min = positive_values.min() if pos_max >= max_finite_val: warnings.warn( "the float32 number {} will be truncated to {}".format( pos_max, max_finite_val ) ) if pos_min <= min_positive_val: warnings.warn( "the float32 number {} will be truncated to {}".format( pos_min, min_positive_val ) ) negative_values = np_array[np.where(np_array < 0)] if negative_values.shape[0] > 0: neg_max = negative_values.max() neg_min = negative_values.min() if neg_min <= -max_finite_val: warnings.warn( "the float32 number {} will be truncated to {}".format( neg_min, -max_finite_val ) ) if neg_max >= -min_positive_val: warnings.warn( "the float32 number {} will be truncated to {}".format( neg_max, -min_positive_val ) ) np_array = np.where( between(0, np_array, min_positive_val), min_positive_val, np_array ) np_array = np.where( between(-min_positive_val, np_array, 0), -min_positive_val, np_array ) np_array = np.where( between(max_finite_val, np_array, float("inf")), max_finite_val, np_array ) np_array = np.where( between(float("-inf"), np_array, -max_finite_val), -max_finite_val, np_array ) return np.float16(np_array) def convert_tensor_float_to_float16(tensor, min_positive_val=1e-7, max_finite_val=1e4): """ Convert tensor float to float16. :param tensor: TensorProto object :return tensor_float16: converted TensorProto object """ if not isinstance(tensor, onnx_proto.TensorProto): raise ValueError( "Expected input type is an ONNX TensorProto but got %s" % type(tensor) ) if tensor.data_type == onnx_proto.TensorProto.FLOAT: tensor.data_type = onnx_proto.TensorProto.FLOAT16 # convert float_data (float type) to float16 and write to int32_data if tensor.float_data: float16_data = convert_np_to_float16( np.array(tensor.float_data), min_positive_val, max_finite_val ) int_list = _npfloat16_to_int(float16_data) tensor.int32_data[:] = int_list tensor.float_data[:] = [] # convert raw_data (bytes type) if tensor.raw_data: # convert n.raw_data to float float32_list = np.fromstring(tensor.raw_data, dtype="float32") # convert float to float16 float16_list = convert_np_to_float16( float32_list, min_positive_val, max_finite_val ) # convert float16 to bytes and write back to raw_data tensor.raw_data = float16_list.tostring() return tensor def make_value_info_from_tensor(tensor): shape = numpy_helper.to_array(tensor).shape return helper.make_tensor_value_info(tensor.name, tensor.data_type, shape) DEFAULT_OP_BLOCK_LIST = [ "ArrayFeatureExtractor", "Binarizer", "CastMap", "CategoryMapper", "DictVectorizer", "FeatureVectorizer", "Imputer", "LabelEncoder", "LinearClassifier", "LinearRegressor", "Normalizer", "OneHotEncoder", "RandomUniformLike", "SVMClassifier", "SVMRegressor", "Scaler", "TreeEnsembleClassifier", "TreeEnsembleRegressor", "ZipMap", "NonMaxSuppression", "TopK", "RoiAlign", "Resize", # 'Range', "CumSum", "Min", "Max", "Upsample", # NEW: "RandomNormalLike", # TODO: Ideally, "Cast" nodes should not be here, for the following reasons: # - It breaks the semantics that the default list contains "ops that are not supported for float16 in ONNX Runtime". # - When fp32 casts already exist in the model (e.g., for rotary embeddings), this script will insert redundant casts around it. # However, without it, the graphs produced are invalid. Eventually, we will resolve this. "Cast", ] def initial_checking(model, disable_shape_infer): func_infer_shape = None if not disable_shape_infer and pv.Version(onnx.__version__) >= pv.Version("1.2"): try: from onnx.shape_inference import infer_shapes func_infer_shape = infer_shapes finally: pass if not isinstance(model, onnx_proto.ModelProto): raise ValueError( "Expected model type is an ONNX ModelProto but got %s" % type(model) ) if func_infer_shape is not None: model = func_infer_shape(model) is_fp16_ready_flag = check_if_fp16_ready(model.graph) return model, func_infer_shape, is_fp16_ready_flag def convert_float_to_float16( model, min_positive_val=1e-7, max_finite_val=1e4, keep_io_types=False, disable_shape_infer=False, op_block_list=None, node_block_list=None, check_fp16_ready=True, ): # create blocklists if op_block_list is None: op_block_list = DEFAULT_OP_BLOCK_LIST if node_block_list is None: node_block_list = [] op_block_list = set(op_block_list) node_block_list = set(node_block_list) global_input_name_dict = ( {} ) # key: input name, value: new output name after Cast node # basic checking, including shape inference model, func_infer_shape, is_fp16_ready_flag = initial_checking( model, disable_shape_infer ) if is_fp16_ready_flag and check_fp16_ready: raise ValueError( "The model is already converted to float16, if convert again, the model might be wrong. \n If you are sure to convert again, please set check_fp16_ready=False." ) graph_stack = [model.graph] is_top_level = True while graph_stack: next_level = [] for curr_graph in graph_stack: process_graph_input( curr_graph, is_top_level, keep_io_types, global_input_name_dict ) value_info_block_list = process_tensor_in_node( curr_graph, op_block_list, node_block_list, min_positive_val, max_finite_val, ) process_value_info(curr_graph, value_info_block_list) process_node_in_block_list( curr_graph, global_input_name_dict, op_block_list, node_block_list ) process_initializers( curr_graph, op_block_list, node_block_list, min_positive_val, max_finite_val, ) process_graph_output(curr_graph, is_top_level, keep_io_types) sub_graph_list = get_next_level_graph( curr_graph, op_block_list, node_block_list ) if len(sub_graph_list) > 0: next_level.extend(sub_graph_list) if not is_top_level: process_node_input_output(curr_graph, global_input_name_dict) is_top_level = False # Going to process sub-graph graph_stack = next_level remove_unnecessary_cast_node(model.graph) # Topologically sort the graph # NOTE: We do not perform another round of optimization as the model is already optimized graph = gs.import_onnx(model) graph.toposort() model = gs.export_onnx(graph) return model # Change the input/output of the node to the new output name after Cast node for sub-graph # Because there have NO value_info start from def process_node_input_output( graph: onnx_proto.GraphProto, global_input_name_dict: dict ): for node in graph.node: for i, input_name in enumerate(node.input): if input_name in global_input_name_dict: node.input[i] = global_input_name_dict[input_name] for i, output_name in enumerate(node.output): if output_name in global_input_name_dict: node.output[i] = global_input_name_dict[output_name] def process_graph_input( graph: onnx_proto.GraphProto, is_top_level: bool, is_io_fp32: bool, global_input_name_dict: dict, ): # The input dtype is float32, need to cast to fp16 if is_top_level and is_io_fp32: for graph_input in graph.input: # n_input is ValueInfoProto if graph_input.type.tensor_type.elem_type == onnx_proto.TensorProto.FLOAT: downstream_nodes = find_downstream_node_by_input_name( graph, graph_input.name ) for d_node in downstream_nodes: # More than one node may consume the model input, so we only create # a single cast node, and then reuse this node when needed. cast_exists = graph_input.name in global_input_name_dict if cast_exists: cast_node_output_name = global_input_name_dict[graph_input.name] else: cast_node_output_name = graph_input.name + "_fp16" add_cast_node( graph, [graph_input.name], [cast_node_output_name], cast_node_output_name, # Set node name same as output name FLOAT16, ) add_new_value_info( graph, graph_input, cast_node_output_name, onnx_proto.TensorProto.FLOAT16, ) for i, input_name in enumerate(d_node.input): if input_name == graph_input.name: d_node.input[i] = ( cast_node_output_name # Change the input of the second node ) global_input_name_dict[graph_input.name] = ( cast_node_output_name ) # For the sub-graph, don't do cast else: # Change the input dtype to fp16 without any cast for graph_input in graph.input: if graph_input.type.tensor_type.elem_type == onnx_proto.TensorProto.FLOAT: graph_input.type.tensor_type.elem_type = onnx_proto.TensorProto.FLOAT16 def process_graph_output( graph: onnx_proto.GraphProto, is_top_level: bool, is_io_fp32: bool ): if is_top_level and is_io_fp32: # the output dtype is float32, need to cast to fp16 for i, graph_output in enumerate(graph.output): if graph_output.type.tensor_type.elem_type == onnx_proto.TensorProto.FLOAT: new_producer_name = graph_output.name + "_fp16" original_name = graph_output.name # The correct output name # Get the node(s) that produce the model output # These will most likely be fp16, but could be fp32 if the previous node is in block_list upstream_nodes = find_upstream_node_by_output_name(graph, original_name) assert len(upstream_nodes) == 1 # Should be only one node producer_node = upstream_nodes[0] for i, output_name in enumerate(producer_node.output): if output_name == original_name: producer_node.output[i] = new_producer_name cast_node_name = new_producer_name + "_input_cast" + str(i) add_cast_node( graph, [new_producer_name], [original_name], cast_node_name, onnx_proto.TensorProto.FLOAT, ) for value_info in graph.value_info: if original_name == value_info.name: value_info.type.tensor_type.elem_type = ( onnx_proto.TensorProto.FLOAT ) # Get the node(s) that consume the model output downstream_nodes = find_downstream_node_by_input_name( graph, original_name, include_subgraphs=False, ) # It is possible that the producer node is also input to downstream nodes # So, we update the inputs of these downstream nodes for d_node in downstream_nodes: for i, input_name in enumerate(d_node.input): if input_name == original_name: d_node.input[i] = new_producer_name else: # change the output dtype to fp16 in tensor for graph_output in graph.output: if graph_output.type.tensor_type.elem_type == onnx_proto.TensorProto.FLOAT: graph_output.type.tensor_type.elem_type = onnx_proto.TensorProto.FLOAT16 def process_node_in_block_list( graph: onnx_proto.GraphProto, global_input_name_dict: dict, op_block_list: list, node_block_list: list, ): # NB: Important to create a copy of the nodes in the graph to avoid modifying # the graph in-place while iterating (causing an infinite loop) for node in list(graph.node): if (node.op_type in op_block_list) or (node.name in node_block_list): insert_cast32_before_node(graph, node, global_input_name_dict) insert_cast16_after_node(graph, node, global_input_name_dict) # Todo: global_input_name_dict still not fill value def insert_cast32_before_node( graph: onnx_proto.GraphProto, node: onnx_proto.NodeProto, global_input_name_dict ): for i, input_name in enumerate(node.input): for value_info in itertools.chain(graph.value_info, graph.input): if input_name == value_info.name: if ( value_info.type.tensor_type.elem_type != onnx_proto.TensorProto.FLOAT16 ): break cast_output_name = node.name + "_input_cast_" + str(i) add_new_value_info( graph, value_info, cast_output_name, onnx_proto.TensorProto.FLOAT ) cast_node_name = node.name + "_input_cast" + str(i) add_cast_node( graph, [input_name], [cast_output_name], cast_node_name, onnx_proto.TensorProto.FLOAT, ) node.input[i] = cast_output_name break # Todo: global_input_name_dict still not fill value def insert_cast16_after_node( graph: onnx_proto.GraphProto, node: onnx_proto.NodeProto, global_input_name_dict ): for i, output_name in enumerate(node.output): for value_info in itertools.chain(graph.value_info, graph.output): if output_name == value_info.name: if ( value_info.type.tensor_type.elem_type != onnx_proto.TensorProto.FLOAT ): break cast_input_name = node.name + "_output_cast_" + str(i) add_new_value_info( graph, value_info, cast_input_name, onnx_proto.TensorProto.FLOAT ) value_info.type.tensor_type.elem_type = onnx_proto.TensorProto.FLOAT16 cast_node_name = node.name + "_output_cast" + str(i) add_cast_node( graph, [cast_input_name], [output_name], cast_node_name, onnx_proto.TensorProto.FLOAT16, ) node.output[i] = cast_input_name break # Process tensor data in attribute of the node def process_tensor_in_node( graph: onnx_proto.GraphProto, op_block_list: list, node_block_list: list, min_positive_val, max_finite_val, ): value_info_block_list = set() # This is for later use, not in this step for node in graph.node: # NOTE: "Cast" operation cannot change its output type because it is strongly typed. if ( (node.op_type in op_block_list) or (node.name in node_block_list) or (node.op_type == "Cast") ): # if (node.op_type in op_block_list) or (node.name in node_block_list): # Only need to block the output value_info changing for output_name in node.output: value_info_block_list.add(output_name) else: for attr in node.attribute: # one tensor if attr.t.data_type == onnx_proto.TensorProto.FLOAT: attr.t.CopyFrom( convert_tensor_float_to_float16( attr.t, min_positive_val, max_finite_val ) ) # list of tensor for t in attr.tensors: if t.data_type == onnx_proto.TensorProto.FLOAT: t.CopyFrom( convert_tensor_float_to_float16( t, min_positive_val, max_finite_val ) ) return value_info_block_list # Change all the value info type from float32 to float16 if not in block list def process_value_info(graph: onnx_proto.GraphProto, value_info_block_list: list): for value_info in graph.value_info: if value_info.name in value_info_block_list: continue else: if value_info.type.tensor_type.elem_type == onnx_proto.TensorProto.FLOAT: value_info.type.tensor_type.elem_type = onnx_proto.TensorProto.FLOAT16 # Initializer is 'edge' type, so doesn't have value_info def process_initializers( graph: onnx_proto.GraphProto, op_block_list, node_block_list, min_positive_val, max_finite_val, ): # Find the input of the block node, don't need to change this kind of initializer initializer_block_list = set() for node in graph.node: if (node.op_type in op_block_list) or (node.name in node_block_list): for ( input_name ) in ( node.input ): # some is initializer, some is value_info, can't distinguish but doesn't matter initializer_block_list.add(input_name) # Process initializers for initializer in graph.initializer: if initializer.name not in initializer_block_list: if initializer.data_type == onnx_proto.TensorProto.FLOAT: convert_tensor_float_to_float16( initializer, min_positive_val, max_finite_val ) def get_next_level_graph( graph: onnx_proto.GraphProto, op_block_list: list, node_block_list: list ): sub_graph_list = [] for node in graph.node: if node.op_type in op_block_list or node.name in node_block_list: continue for attr in node.attribute: # Check if sub-graph exist if len(attr.g.node) > 0: # single sub-graph sub_graph_list.append(attr.g) for g in attr.graphs: if len(g.node) > 0: # multiple sub-graphs sub_graph_list.append(g) return sub_graph_list def add_cast_node( graph: onnx_proto.GraphProto, inputs: list, outputs: list, node_name: str, to_type: int, ): new_node = [helper.make_node("Cast", inputs, outputs, to=to_type, name=node_name)] graph.node.extend(new_node) def add_new_value_info( graph: onnx_proto.GraphProto, exist_value_info: onnx_proto.ValueInfoProto, name: str, dtype: int, ): new_value_info = graph.value_info.add() new_value_info.CopyFrom(exist_value_info) new_value_info.name = name new_value_info.type.tensor_type.elem_type = dtype # Find the node that has the specified output name def find_upstream_node_by_output_name(graph: onnx_proto.GraphProto, output_name: str): nodes = [] for node in graph.node: if output_name in node.output: nodes.append(node) assert len(nodes) <= 1 # Suppose there is less than one node found return nodes # Find the node that has the specified input name, including in subgraphs def find_downstream_node_by_input_name( graph: onnx_proto.GraphProto, input_name: str, include_subgraphs=True ): nodes = [] # Check nodes in current graph for node in graph.node: if input_name in node.input: nodes.append(node) if not include_subgraphs: continue # Recursively check subgraphs in node attributes for attr in node.attribute: if attr.type == onnx_proto.AttributeProto.GRAPH: # Single subgraph if len(attr.g.node) > 0: nodes.extend(find_downstream_node_by_input_name(attr.g, input_name)) # Multiple subgraphs if attr.type == onnx_proto.AttributeProto.GRAPHS: for g in attr.graphs: if len(g.node) > 0: nodes.extend(find_downstream_node_by_input_name(g, input_name)) return nodes # Remove identity node def remove_identity_node_from_model(model: onnx_proto.ModelProto): remove_identity_node_from_graph(model.graph) try: from onnx.shape_inference import infer_shapes func_infer_shape = infer_shapes model = func_infer_shape(model) return model finally: pass # Remove identity node def remove_identity_node_from_graph(graph: onnx_proto.GraphProto): for curr_node in graph.node: if curr_node.op_type == "Identity": for input_name in curr_node.input: upstream_nodes = find_upstream_node_by_output_name(graph, input_name) for u_node in upstream_nodes: if u_node is not None: u_node.output[0] = curr_node.output[0] graph.node.remove(curr_node) def convert_float_to_float16_model_path( model_path, min_positive_val=1e-7, max_finite_val=1e4, keep_io_types=False ): """ Convert tensor float type in the ONNX Model to tensor float16. *It is to fix an issue that infer_shapes func cannot be used to infer >2GB models. *But this function can be applied to all model sizes. :param model_path: ONNX Model path :return: converted ONNX ModelProto object Examples :: #Convert to ONNX ModelProto object and save model binary file: from onnxmltools.utils.float16_converter import convert_float_to_float16_model_path new_onnx_model = convert_float_to_float16_model_path('model.onnx') onnx.save(new_onnx_model, 'new_model.onnx') """ disable_shape_infer = False if pv.Version(onnx.__version__) >= pv.Version("1.8"): try: # infer_shapes_path can be applied to all model sizes from onnx.shape_inference import infer_shapes_path import tempfile import os # shape_infer_model_path should be in the same folder of model_path with tempfile.NamedTemporaryFile( dir=os.path.dirname(model_path) ) as tmpfile: shape_infer_model_path = tmpfile.name infer_shapes_path(model_path, shape_infer_model_path) model = onnx.load(shape_infer_model_path) disable_shape_infer = True finally: pass if not disable_shape_infer: model = onnx.load(model_path) return convert_float_to_float16( model, min_positive_val, max_finite_val, keep_io_types, disable_shape_infer ) def remove_unnecessary_cast_node(graph_proto: onnx_proto.GraphProto): # 1. find all cast nodes in the graph cast_node_list = [] input_name_to_cast_node_dict = {} output_name_to_cast_node_dict = {} # using name as key to point to a node. because node object cannot be key name_to_node_dict = {} for node in graph_proto.node: if node.op_type == "Cast": # if node.name not in ["graph_input_cast0", "graph_output_cast0"]: cast_node_list.append(node) name_to_node_dict[node.name] = node for input_name in node.input: input_name_to_cast_node_dict[input_name] = node for output_name in node.output: output_name_to_cast_node_dict[output_name] = node # 2. find upstream and downstream node of the cast node cast_node_upstream_dict = {} # mapping cast node(name) to its upstream node cast_node_downstream_dict = {} # mapping cast node(name) to its downstream node for current_node in graph_proto.node: # find the downstream node(s) for input_name in current_node.input: if input_name in output_name_to_cast_node_dict: # found the downstream node of the cast node, might be multiple cast_node = output_name_to_cast_node_dict[input_name] if cast_node.name not in cast_node_downstream_dict: cast_node_downstream_dict[cast_node.name] = current_node else: # already exists one downstream node, make it a list existing_downstream_nodes = cast_node_downstream_dict[ cast_node.name ] if isinstance(existing_downstream_nodes, list): existing_downstream_nodes.append(current_node) else: # make a list existing_downstream_nodes = [ existing_downstream_nodes, current_node, ] cast_node_downstream_dict[cast_node.name] = ( existing_downstream_nodes ) # find the upstream node for output_name in current_node.output: if output_name in input_name_to_cast_node_dict: # found the upstream node of the cast node, should be unique cast_node = input_name_to_cast_node_dict[output_name] cast_node_upstream_dict[cast_node.name] = current_node # 3. remove the cast node which upstream is 'Constant' for cast_node_name, upstream_node in cast_node_upstream_dict.items(): cast_node = name_to_node_dict[cast_node_name] if upstream_node.op_type == "Constant": cast_node_list.remove(cast_node) # 4. find (cast_to_fp16, cast_to_fp32) pairs where --fp32--> cast_to_fp16 --fp16--> cast_to_fp32. remove_candidate = [] name_to_value_info = { value_info.name: value_info for value_info in itertools.chain(graph_proto.value_info, graph_proto.input) } def get_type(name: str) -> Optional[int]: if name in name_to_value_info: return name_to_value_info[name].type else: # `name` has no value info. return None for cast_node_name, downstream_node in cast_node_downstream_dict.items(): cast_node = name_to_node_dict[cast_node_name] if len(cast_node.input) != 1: raise RuntimeError( f"Cast node {cast_node_name} should have only one input, but has {len(cast_node.input)}." ) input_type = get_type(cast_node.input[0]) if input_type != onnx_proto.TensorProto.FLOAT: continue if isinstance(downstream_node, list): for dn in downstream_node: if ( dn.op_type == "Cast" and dn.attribute[0].i == 32 and cast_node.attribute[0].i == 16 and dn in cast_node_list and cast_node in cast_node_list ): remove_candidate.append((cast_node, dn)) else: if ( downstream_node.op_type == "Cast" and cast_node.attribute[0].i == FLOAT16 and downstream_node.attribute[0].i == FLOAT32 and downstream_node in cast_node_list and cast_node in cast_node_list ): remove_candidate.append((cast_node, downstream_node)) # 5. change "upstream --fp32--> cast_to_fp16 --fp16--> cast_to_fp32 --fp32--> downstream" to # "upstream --fp32--> downstream". for cast_node_pair in remove_candidate: first_cast_node = cast_node_pair[0] second_cast_node = cast_node_pair[1] upstream_node = cast_node_upstream_dict.get(first_cast_node.name) downstream_node = cast_node_downstream_dict.get(second_cast_node.name) if upstream_node is None and downstream_node is not None: # The upstream_node should be graph input out = first_cast_node.input[0] for i, input_name in enumerate(downstream_node.input): for output_name in second_cast_node.output: if input_name == output_name: # change the input as the upstream node's output downstream_node.input[i] = out elif upstream_node is not None and downstream_node is None: raise ValueError( "The downstream node of the second cast node should be graph output" ) else: # find the upstream node's output to first_cast_node out = None for output_name in upstream_node.output: if output_name == first_cast_node.input[0]: out = output_name break # find the downstream node's input as second_cast_node's output for i, input_name in enumerate(downstream_node.input): for output_name in second_cast_node.output: if input_name == output_name: # change the input as the upstream node's output downstream_node.input[i] = out # 6. remove the cast node pair for cast_node_pair in remove_candidate: graph_proto.node.remove(cast_node_pair[0]) graph_proto.node.remove(cast_node_pair[1]) # Check if the model is already converted to float16 def check_if_fp16_ready(graph_proto): # Check graph input and ouput is_value_info_fp16 = False for value_info in itertools.chain( graph_proto.output, graph_proto.input, graph_proto.value_info ): if value_info.type.tensor_type.elem_type == onnx_proto.TensorProto.FLOAT16: is_value_info_fp16 = True break # Check initializer is_initializer_fp16 = False for initializer in graph_proto.initializer: if initializer.data_type == onnx_proto.TensorProto.FLOAT16: is_initializer_fp16 = True break # Check cast node has_cast_node_fp16 = False for node in graph_proto.node: if node.op_type == "Cast" and node.attribute[0].i == FLOAT16: has_cast_node_fp16 = True break # Any of above flags is True, return True if is_value_info_fp16 or is_initializer_fp16 or has_cast_node_fp16: return True # already converted to float16 else: return False # not converted to float16 yet
transformers.js/scripts/float16.py/0
{ "file_path": "transformers.js/scripts/float16.py", "repo_id": "transformers.js", "token_count": 16340 }
346
/** * @file Definitions of all models available in Transformers.js. * * **Example:** Load and run an `AutoModel`. * * ```javascript * import { AutoModel, AutoTokenizer } from '@huggingface/transformers'; * * let tokenizer = await AutoTokenizer.from_pretrained('Xenova/bert-base-uncased'); * let model = await AutoModel.from_pretrained('Xenova/bert-base-uncased'); * * let inputs = await tokenizer('I love transformers!'); * let { logits } = await model(inputs); * // Tensor { * // data: Float32Array(183132) [-7.117443084716797, -7.107812881469727, -7.092104911804199, ...] * // dims: (3) [1, 6, 30522], * // type: "float32", * // size: 183132, * // } * ``` * * We also provide other `AutoModel`s (listed below), which you can use in the same way as the Python library. For example: * * **Example:** Load and run an `AutoModelForSeq2SeqLM`. * ```javascript * import { AutoModelForSeq2SeqLM, AutoTokenizer } from '@huggingface/transformers'; * * let tokenizer = await AutoTokenizer.from_pretrained('Xenova/t5-small'); * let model = await AutoModelForSeq2SeqLM.from_pretrained('Xenova/t5-small'); * * let { input_ids } = await tokenizer('translate English to German: I love transformers!'); * let outputs = await model.generate(input_ids); * let decoded = tokenizer.decode(outputs[0], { skip_special_tokens: true }); * // 'Ich liebe Transformatoren!' * ``` * * @module models */ import { AutoConfig, getCacheShapes, } from './configs.js'; import { deviceToExecutionProviders, createInferenceSession, isONNXTensor, isONNXProxy, } from './backends/onnx.js'; import { DATA_TYPES, DEFAULT_DEVICE_DTYPE_MAPPING, DEFAULT_DTYPE_SUFFIX_MAPPING, isWebGpuFp16Supported, } from './utils/dtypes.js'; import { Callable, } from './utils/generic.js'; import { mergeArrays, pick, } from './utils/core.js'; import { getModelFile, getModelJSON, MAX_EXTERNAL_DATA_CHUNKS, } from './utils/hub.js'; import { GITHUB_ISSUE_URL, } from './utils/constants.js'; import { LogitsProcessorList, ForcedBOSTokenLogitsProcessor, ForcedEOSTokenLogitsProcessor, SuppressTokensAtBeginLogitsProcessor, WhisperTimeStampLogitsProcessor, NoRepeatNGramLogitsProcessor, RepetitionPenaltyLogitsProcessor, NoBadWordsLogitsProcessor, MinLengthLogitsProcessor, MinNewTokensLengthLogitsProcessor, TemperatureLogitsWarper, TopKLogitsWarper, TopPLogitsWarper, ClassifierFreeGuidanceLogitsProcessor, } from './generation/logits_process.js'; import { GenerationConfig, } from './generation/configuration_utils.js'; import { cat, mean, zeros, zeros_like, ones, ones_like, full, full_like, stack, std_mean, Tensor, DataTypeMap, } from './utils/tensor.js'; import { RawImage } from './utils/image.js'; import { dynamic_time_warping, max, medianFilter } from './utils/maths.js'; import { EosTokenCriteria, MaxLengthCriteria, StoppingCriteriaList } from './generation/stopping_criteria.js'; import { LogitsSampler } from './generation/logits_sampler.js'; import { apis, env } from './env.js'; import { WhisperGenerationConfig } from './models/whisper/generation_whisper.js'; import { whisper_language_to_code } from './models/whisper/common_whisper.js'; ////////////////////////////////////////////////// // Model types: used internally const MODEL_TYPES = { EncoderOnly: 0, EncoderDecoder: 1, Seq2Seq: 2, Vision2Seq: 3, DecoderOnly: 4, MaskGeneration: 5, ImageTextToText: 6, Musicgen: 7, MultiModality: 8, Phi3V: 9, AudioTextToText: 10, AutoEncoder: 11, ImageAudioTextToText: 12, } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Helper functions // NOTE: These will be populated fully later const MODEL_TYPE_MAPPING = new Map(); const MODEL_NAME_TO_CLASS_MAPPING = new Map(); const MODEL_CLASS_TO_NAME_MAPPING = new Map(); /** * Constructs an InferenceSession using a model file located at the specified path. * @param {string} pretrained_model_name_or_path The path to the directory containing the model file. * @param {string} fileName The name of the model file. * @param {import('./utils/hub.js').PretrainedModelOptions} options Additional options for loading the model. * @returns {Promise<{buffer_or_path: Uint8Array|string, session_options: Object, session_config: Object}>} A Promise that resolves to the data needed to create an InferenceSession object. * @private */ async function getSession(pretrained_model_name_or_path, fileName, options) { let custom_config = options.config?.['transformers.js_config'] ?? {}; let device = options.device ?? custom_config.device; if (device && typeof device !== 'string') { if (device.hasOwnProperty(fileName)) { device = device[fileName]; } else { console.warn(`device not specified for "${fileName}". Using the default device.`); device = null; } } // If the device is not specified, we use the default (supported) execution providers. const selectedDevice = /** @type {import("./utils/devices.js").DeviceType} */( device ?? (apis.IS_NODE_ENV ? 'cpu' : 'wasm') ); const executionProviders = deviceToExecutionProviders(selectedDevice); // Update custom config with the selected device's config, if it exists const device_config = custom_config.device_config ?? {}; if (device_config.hasOwnProperty(selectedDevice)) { custom_config = { ...custom_config, ...device_config[selectedDevice], }; } // If options.dtype is specified, we use it to choose the suffix for the model file. // Otherwise, we use the default dtype for the device. let dtype = options.dtype ?? custom_config.dtype; if (typeof dtype !== 'string') { if (dtype && dtype.hasOwnProperty(fileName)) { dtype = dtype[fileName]; } else { dtype = DEFAULT_DEVICE_DTYPE_MAPPING[selectedDevice] ?? DATA_TYPES.fp32; console.warn(`dtype not specified for "${fileName}". Using the default dtype (${dtype}) for this device (${selectedDevice}).`); } } if (dtype === DATA_TYPES.auto) { // Try to choose the auto dtype based on the custom config let config_dtype = custom_config.dtype; if (typeof config_dtype !== 'string') { config_dtype = config_dtype?.[fileName]; } if (config_dtype && config_dtype !== DATA_TYPES.auto && DATA_TYPES.hasOwnProperty(config_dtype)) { // Defined by the config, and is not "auto" dtype = config_dtype; } else { // Choose default dtype based on device, falling back to fp32 dtype = DEFAULT_DEVICE_DTYPE_MAPPING[selectedDevice] ?? DATA_TYPES.fp32; } } const selectedDtype = /** @type {import("./utils/dtypes.js").DataType} */(dtype); if (!DEFAULT_DTYPE_SUFFIX_MAPPING.hasOwnProperty(selectedDtype)) { throw new Error(`Invalid dtype: ${selectedDtype}. Should be one of: ${Object.keys(DATA_TYPES).join(', ')}`); } else if (selectedDtype === DATA_TYPES.fp16 && selectedDevice === 'webgpu' && !(await isWebGpuFp16Supported())) { throw new Error(`The device (${selectedDevice}) does not support fp16.`); } // Only valid for models with a decoder const kv_cache_dtype_config = custom_config.kv_cache_dtype; const kv_cache_dtype = kv_cache_dtype_config ? (typeof kv_cache_dtype_config === 'string' ? kv_cache_dtype_config : kv_cache_dtype_config[selectedDtype] ?? 'float32') : undefined; if (kv_cache_dtype && !['float32', 'float16'].includes(kv_cache_dtype)) { throw new Error(`Invalid kv_cache_dtype: ${kv_cache_dtype}. Should be one of: float32, float16`); } const session_config = { dtype: selectedDtype, kv_cache_dtype, device: selectedDevice, } // Construct the model file name const suffix = DEFAULT_DTYPE_SUFFIX_MAPPING[selectedDtype]; const baseName = `${fileName}${suffix}.onnx`; const modelFileName = `${options.subfolder ?? ''}/${baseName}`; const session_options = { ...options.session_options }; // Overwrite `executionProviders` if not specified session_options.executionProviders ??= executionProviders; // Overwrite `freeDimensionOverrides` if specified in config and not set in session options const free_dimension_overrides = custom_config.free_dimension_overrides; if (free_dimension_overrides) { session_options.freeDimensionOverrides ??= free_dimension_overrides; } else if (selectedDevice.startsWith('webnn') && !session_options.freeDimensionOverrides) { console.warn( `WebNN does not currently support dynamic shapes and requires 'free_dimension_overrides' to be set in config.json, preferably as a field within config["transformers.js_config"]["device_config"]["${selectedDevice}"]. ` + `When 'free_dimension_overrides' is not set, you may experience significant performance degradation.` ); } const return_path = apis.IS_NODE_ENV && env.useFSCache; const bufferOrPathPromise = getModelFile(pretrained_model_name_or_path, modelFileName, true, options, return_path); // Handle onnx external data files const use_external_data_format = options.use_external_data_format ?? custom_config.use_external_data_format; /** @type {Promise<string|{path: string, data: Uint8Array}>[]} */ let externalDataPromises = []; if (use_external_data_format) { let external_data_format; if (typeof use_external_data_format === 'object') { if (use_external_data_format.hasOwnProperty(baseName)) { external_data_format = use_external_data_format[baseName]; } else if (use_external_data_format.hasOwnProperty(fileName)) { external_data_format = use_external_data_format[fileName]; } else { external_data_format = false; } } else { external_data_format = use_external_data_format; } const num_chunks = +external_data_format; // (false=0, true=1, number remains the same) if (num_chunks > MAX_EXTERNAL_DATA_CHUNKS) { throw new Error(`The number of external data chunks (${num_chunks}) exceeds the maximum allowed value (${MAX_EXTERNAL_DATA_CHUNKS}).`); } for (let i = 0; i < num_chunks; ++i) { const path = `${baseName}_data${i === 0 ? '' : '_' + i}`; const fullPath = `${options.subfolder ?? ''}/${path}`; externalDataPromises.push(new Promise(async (resolve, reject) => { const data = await getModelFile(pretrained_model_name_or_path, fullPath, true, options, return_path); resolve(data instanceof Uint8Array ? { path, data } : path); })); } } else if (session_options.externalData !== undefined) { externalDataPromises = session_options.externalData.map(async (ext) => { // if the external data is a string, fetch the file and replace the string with its content // @ts-expect-error TS2339 if (typeof ext.data === "string") { // @ts-expect-error TS2339 const ext_buffer = await getModelFile(pretrained_model_name_or_path, ext.data, true, options); // @ts-expect-error TS2698 return { ...ext, data: ext_buffer }; } return ext; }); } if (externalDataPromises.length > 0) { const externalData = await Promise.all(externalDataPromises); if (!apis.IS_NODE_ENV) { session_options.externalData = externalData; } } if (selectedDevice === 'webgpu') { const shapes = getCacheShapes(options.config, { prefix: 'present', }); if (Object.keys(shapes).length > 0 && !isONNXProxy()) { // Only set preferredOutputLocation if shapes are present and we aren't proxying ONNX /** @type {Record<string, import('onnxruntime-common').Tensor.DataLocation>} */ const preferredOutputLocation = {}; for (const key in shapes) { preferredOutputLocation[key] = 'gpu-buffer'; } session_options.preferredOutputLocation = preferredOutputLocation; } } const buffer_or_path = await bufferOrPathPromise; return { buffer_or_path, session_options, session_config }; } /** * Helper function to create multiple InferenceSession objects. * * @param {string} pretrained_model_name_or_path The path to the directory containing the model file. * @param {Record<string, string>} names The names of the model files to load. * @param {import('./utils/hub.js').PretrainedModelOptions} options Additional options for loading the model. * @returns {Promise<Record<string, any>>} A Promise that resolves to a dictionary of InferenceSession objects. * @private */ async function constructSessions(pretrained_model_name_or_path, names, options) { return Object.fromEntries(await Promise.all( Object.keys(names).map(async (name) => { const { buffer_or_path, session_options, session_config } = await getSession(pretrained_model_name_or_path, names[name], options); const session = await createInferenceSession(buffer_or_path, session_options, session_config); return [name, session]; }) )); } /** * Helper function to load multiple optional configuration files * @param {string} pretrained_model_name_or_path The path to the directory containing the config file. * @param {Record<string, string>} names The names of the config files to load. * @param {import('./utils/hub.js').PretrainedModelOptions} options Additional options for loading the configs. * @returns {Promise<Record<string, any>>} A Promise that resolves to a dictionary of configuration objects. * @private */ async function getOptionalConfigs(pretrained_model_name_or_path, names, options) { return Object.fromEntries(await Promise.all( Object.keys(names).map(async (name) => { const config = await getModelJSON(pretrained_model_name_or_path, names[name], false, options); return [name, config]; }) )); } /** * Validate model inputs * @param {Object} session The InferenceSession object that will be run. * @param {Object} inputs The inputs to check. * @returns {Record<string, Tensor>} The checked inputs. * @throws {Error} If any inputs are missing. * @private */ function validateInputs(session, inputs) { /** * NOTE: Create either a shallow or deep copy based on `onnx.wasm.proxy` * @type {Record<string, Tensor>} */ const checkedInputs = Object.create(null); const missingInputs = []; for (const inputName of session.inputNames) { const tensor = inputs[inputName]; // Rare case where one of the model's input names corresponds to a built-in // object name (e.g., toString), which would cause a simple (!tensor) check to fail, // because it's not undefined but a function. if (!(tensor instanceof Tensor)) { missingInputs.push(inputName); continue; } // NOTE: When `env.wasm.proxy is true` the tensor is moved across the Worker // boundary, transferring ownership to the worker and invalidating the tensor. // So, in this case, we simply sacrifice a clone for it. checkedInputs[inputName] = isONNXProxy() ? tensor.clone() : tensor; } if (missingInputs.length > 0) { throw new Error( `An error occurred during model execution: "Missing the following inputs: ${missingInputs.join(', ')}.`); } const numInputsProvided = Object.keys(inputs).length; const numInputsNeeded = session.inputNames.length; if (numInputsProvided > numInputsNeeded) { // No missing inputs, but too many inputs were provided. // Warn the user and ignore the extra inputs. let ignored = Object.keys(inputs).filter(inputName => !session.inputNames.includes(inputName)); console.warn(`WARNING: Too many inputs were provided (${numInputsProvided} > ${numInputsNeeded}). The following inputs will be ignored: "${ignored.join(', ')}".`); } return checkedInputs; } // Currently, Transformers.js doesn't support simultaneous execution of sessions in WASM/WebGPU. // For this reason, we need to chain the inference calls (otherwise we get "Error: Session already started"). let webInferenceChain = Promise.resolve(); /** * Executes an InferenceSession using the specified inputs. * NOTE: `inputs` must contain at least the input names of the model. * - If additional inputs are passed, they will be ignored. * - If inputs are missing, an error will be thrown. * * @param {Object} session The InferenceSession object to run. * @param {Object} inputs An object that maps input names to input tensors. * @returns {Promise<Object>} A Promise that resolves to an object that maps output names to output tensors. * @private */ async function sessionRun(session, inputs) { const checkedInputs = validateInputs(session, inputs); try { // pass the original ort tensor const ortFeed = Object.fromEntries(Object.entries(checkedInputs).map(([k, v]) => [k, v.ort_tensor])); const run = () => session.run(ortFeed); const output = await ((apis.IS_BROWSER_ENV || apis.IS_WEBWORKER_ENV) ? (webInferenceChain = webInferenceChain.then(run)) : run()); return replaceTensors(output); } catch (e) { // Error messages can be long (nested) and uninformative. For this reason, // we apply minor formatting to show the most important information const formatted = Object.fromEntries(Object.entries(checkedInputs) .map(([k, tensor]) => { // Extract these properties from the underlying ORT tensor const unpacked = { type: tensor.type, dims: tensor.dims, location: tensor.location, } if (unpacked.location !== "gpu-buffer") { // Only return the data if it's not a GPU buffer unpacked.data = tensor.data; } return [k, unpacked]; })); // This usually occurs when the inputs are of the wrong type. console.error(`An error occurred during model execution: "${e}".`); console.error('Inputs given to model:', formatted); throw e; } } /** * Replaces ONNX Tensor objects with custom Tensor objects to support additional functions. * @param {Object} obj The object to replace tensor objects in. * @returns {Object} The object with tensor objects replaced by custom Tensor objects. * @private */ function replaceTensors(obj) { for (let prop in obj) { if (isONNXTensor(obj[prop])) { obj[prop] = new Tensor(obj[prop]); } else if (typeof obj[prop] === 'object') { replaceTensors(obj[prop]); } } return obj; } /** * Converts an array or Tensor of integers to an int64 Tensor. * @param {any[]|Tensor} items The input integers to be converted. * @returns {Tensor} The int64 Tensor with the converted values. * @throws {Error} If the input array is empty or the input is a batched Tensor and not all sequences have the same length. * @private */ function toI64Tensor(items) { if (items instanceof Tensor) { return items; } // items is an array if (items.length === 0) { throw Error("items must be non-empty"); } if (Array.isArray(items[0])) { // batched if (items.some(x => x.length !== items[0].length)) { throw Error("Unable to create tensor, you should probably activate truncation and/or padding with 'padding=True' and/or 'truncation=True' to have batched tensors with the same length.") } return new Tensor('int64', BigInt64Array.from(items.flat().map(x => BigInt(x))), [items.length, items[0].length] ); } else { //flat return new Tensor('int64', BigInt64Array.from(items.map(x => BigInt(x))), [1, items.length] ); } } /** * Creates a boolean tensor with a single value. * @param {boolean} value The value of the tensor. * @returns {Tensor} The boolean tensor. * @private */ function boolTensor(value) { return new Tensor('bool', [value], [1]); } // JS doesn't support mixins, so we define some reused functions here, and allow "this" to be passed in /** * Perform forward pass on the seq2seq model (both encoder and decoder). * @param {Object} self The seq2seq model object. * @param {Object} model_inputs The input object for the model containing encoder and decoder inputs. * @returns {Promise<Seq2SeqLMOutput>} Promise that resolves with the output of the seq2seq model. * @private */ async function seq2seqForward(self, model_inputs) { let { encoder_outputs, input_ids, decoder_input_ids, ...other_decoder_inputs } = model_inputs; // Encode if needed if (!encoder_outputs) { const encoder_inputs = pick(model_inputs, self.sessions['model'].inputNames); // Encoder outputs are not given, so we must compute them. encoder_outputs = (await encoderForward(self, encoder_inputs)).last_hidden_state; } other_decoder_inputs.input_ids = decoder_input_ids; other_decoder_inputs.encoder_hidden_states = encoder_outputs; if (self.sessions['decoder_model_merged'].inputNames.includes('encoder_attention_mask')) { other_decoder_inputs.encoder_attention_mask = model_inputs.attention_mask } const decoderResults = await decoderForward(self, other_decoder_inputs, true); return decoderResults; } /** * Forward pass of an encoder model. * @param {Object} self The encoder model. * @param {Object} model_inputs The input data to be used for the forward pass. * @returns {Promise<Object>} The model's outputs. * @private */ async function encoderForward(self, model_inputs) { const session = self.sessions['model']; const encoderFeeds = pick(model_inputs, session.inputNames); if (session.inputNames.includes('inputs_embeds') && !encoderFeeds.inputs_embeds) { if (!model_inputs.input_ids) { throw new Error('Both `input_ids` and `inputs_embeds` are missing in the model inputs.'); } encoderFeeds.inputs_embeds = await self.encode_text({ input_ids: model_inputs.input_ids }); } if (session.inputNames.includes('token_type_ids') && !encoderFeeds.token_type_ids) { if (!encoderFeeds.input_ids) { throw new Error('Both `input_ids` and `token_type_ids` are missing in the model inputs.'); } // Assign default `token_type_ids` (all zeroes) to the `encoderFeeds` if the model expects it, // but they weren't created by the tokenizer. encoderFeeds.token_type_ids = zeros_like(encoderFeeds.input_ids); } if (session.inputNames.includes('pixel_mask') && !encoderFeeds.pixel_mask) { if (!encoderFeeds.pixel_values) { throw new Error('Both `pixel_values` and `pixel_mask` are missing in the model inputs.'); } // Assign default `pixel_mask` (all ones) to the `encoderFeeds` if the model expects it, // but they weren't created by the processor. const dims = encoderFeeds.pixel_values.dims; encoderFeeds.pixel_mask = ones([dims[0], dims[2], dims[3]]); } return await sessionRun(session, encoderFeeds); } async function autoEncoderForward(self, model_inputs) { const encoded = await self.encode(model_inputs); const decoded = await self.decode(encoded); return decoded; } /** * Forward pass of a decoder model. * @param {Object} self The decoder model. * @param {Object} model_inputs The input data to be used for the forward pass. * @returns {Promise<Object>} The logits and past key values. * @private */ async function decoderForward(self, model_inputs, is_encoder_decoder = false) { const session = self.sessions[ is_encoder_decoder ? 'decoder_model_merged' : 'model' ] const { past_key_values, ...new_model_inputs } = model_inputs; if (session.inputNames.includes('use_cache_branch')) { new_model_inputs.use_cache_branch = boolTensor(!!past_key_values); } if (session.inputNames.includes('position_ids') && new_model_inputs.attention_mask && !new_model_inputs.position_ids) { // NOTE: Handle a special case for paligemma/gemma3 models, where positions are 1-indexed const start_index = ['paligemma', 'gemma3_text', 'gemma3'].includes(self.config.model_type) ? 1 : 0; new_model_inputs.position_ids = createPositionIds(new_model_inputs, past_key_values, start_index); } // Unpack the `past_key_values` object into model inputs self.addPastKeyValues(new_model_inputs, past_key_values); // Select only the inputs that are needed for the current session const fixed = pick(new_model_inputs, session.inputNames); return await sessionRun(session, fixed); } function default_merge_input_ids_with_features({ modality_token_id, inputs_embeds, modality_features, input_ids, attention_mask, }) { const token_positions = input_ids.tolist().map(ids => ids.reduce((acc, x, idx) => { if (x == modality_token_id) acc.push(idx); return acc; }, []) ); const n_tokens = token_positions.reduce((acc, x) => acc + x.length, 0); const n_features = modality_features.dims[0]; if (n_tokens !== n_features) { throw new Error(`Number of tokens and features do not match: tokens: ${n_tokens}, features ${n_features}`); } // Equivalent to performing a masked_scatter let img = 0; for (let i = 0; i < token_positions.length; ++i) { const tokens = token_positions[i]; const embeds = inputs_embeds[i]; for (let j = 0; j < tokens.length; ++j) { embeds[tokens[j]].data.set(modality_features[img++].data) } } return { inputs_embeds, attention_mask } } function default_merge_input_ids_with_image_features({ image_token_id, inputs_embeds, image_features, input_ids, attention_mask, }) { return default_merge_input_ids_with_features({ modality_token_id: image_token_id, inputs_embeds, modality_features: image_features, input_ids, attention_mask, }) } function default_merge_input_ids_with_audio_features({ audio_token_id, inputs_embeds, audio_features, input_ids, attention_mask, }) { return default_merge_input_ids_with_features({ modality_token_id: audio_token_id, inputs_embeds, modality_features: audio_features, input_ids, attention_mask, }) } /** * Abstract forward pass function for image-text-to-text or audio-text-to-text models. * @param {Object} self The model object. * @param {Object} params Additional parameters. * @param {Function} [params.encode_function] The function to encode the modality values. * @param {Function} [params.merge_function] The function to merge the modality features with the input embeddings. * @param {string} [params.modality_input_name] The modality input name. * @param {string} [params.modality_output_name] The modality output name. * @param {Tensor} [params.input_ids=null] * @param {Tensor} [params.attention_mask=null] * @param {Tensor} [params.position_ids=null] * @param {Tensor} [params.inputs_embeds=null] * @param {Tensor} [params.past_key_values=null] * @param {Object} [params.generation_config=null] * @param {Object} [params.logits_processor=null] * @returns {Promise<Tensor>} The model's output tensor * @private */ async function genericTextToTextForward(self, { // Generic parameters: encode_function, merge_function, modality_input_name, modality_output_name, // Produced by the tokenizer/processor: input_ids = null, attention_mask = null, // Used during generation: position_ids = null, inputs_embeds = null, past_key_values = null, // Generic generation parameters generation_config = null, logits_processor = null, // Additional parameters ...kwargs }) { const modality_values = kwargs[modality_input_name]; if (!inputs_embeds) { // 1. Extract the text embeddings. inputs_embeds = await self.encode_text({ input_ids, ...kwargs }); // 2. Possibly, merge text and modality values if (modality_values && input_ids.dims[1] !== 1) { const modality_features = await encode_function({ // Pass the modality values under its expected key. // The caller knows whether this is audio or image. [modality_input_name]: modality_values, ...kwargs }); ({ inputs_embeds, attention_mask } = merge_function({ [modality_output_name]: modality_features, inputs_embeds, input_ids, attention_mask, })); } else if (past_key_values && modality_values && input_ids.dims[1] === 1) { // This branch handles the cache case. const target_length = input_ids.dims[1]; // always 1 const past_length = Object.values(past_key_values)[0].dims.at(-2); attention_mask = cat([ ones([input_ids.dims[0], past_length]), attention_mask.slice(null, [attention_mask.dims[1] - target_length, attention_mask.dims[1]]), ], 1); } } if (!position_ids) { if (self.config.model_type === 'qwen2_vl') { // Special case for qwen2_vl models // @ts-ignore const { image_grid_thw, video_grid_thw } = kwargs; [position_ids] = self.get_rope_index(input_ids, image_grid_thw, video_grid_thw, attention_mask) } } // 3. Call the decoder forward using the updated inputs. const outputs = await decoderForward(self, { inputs_embeds, past_key_values, attention_mask, position_ids, generation_config, logits_processor, }, true); return outputs; } /** * Forward pass of an audio-text-to-text model. * @param {Object} self The audio-text-to-text model. * @param {Object} params The inputs for the audio-text-to-text forward pass. * @returns {Promise<Tensor>} The model's output tensor. * @private */ async function audioTextToTextForward(self, params) { return await genericTextToTextForward(self, { ...params, modality_input_name: 'audio_values', modality_output_name: 'audio_features', encode_function: self.encode_audio.bind(self), merge_function: self._merge_input_ids_with_audio_features.bind(self), }); } /** * Forward pass of an image-text-to-text model. * @param {Object} self The image-text-to-text model. * @param {Object} params The inputs for the image-text-to-text forward pass. * @returns {Promise<Tensor>} The model's output tensor. * @private */ async function imageTextToTextForward(self, params) { return await genericTextToTextForward(self, { ...params, modality_input_name: 'pixel_values', modality_output_name: 'image_features', encode_function: self.encode_image.bind(self), merge_function: self._merge_input_ids_with_image_features.bind(self), }); } /** * Helper function to perform the following: * ```python * x = attention_mask.long().cumsum(-1) - 1 * x.masked_fill_(attention_mask == 0, 1) * ``` * @param {Tensor} attention_mask * @returns {{data: BigInt64Array, dims: number[]}} */ function cumsum_masked_fill(attention_mask, start_index = 0) { const [bz, seq_len] = attention_mask.dims; const attn_mask_data = attention_mask.data; const data = new BigInt64Array(attn_mask_data.length); for (let i = 0; i < bz; ++i) { const start = i * seq_len; let sum = BigInt(start_index); for (let j = 0; j < seq_len; ++j) { const index = start + j; if (attn_mask_data[index] === 0n) { data[index] = BigInt(1); } else { // === 1n data[index] = sum; sum += attn_mask_data[index]; } } } return { data, dims: attention_mask.dims }; } /** * If the model supports providing position_ids, we create position_ids on the fly for batch generation, * by computing the cumulative sum of the attention mask along the sequence length dimension. * * Equivalent to: * ```python * position_ids = attention_mask.long().cumsum(-1) - 1 * position_ids.masked_fill_(attention_mask == 0, 1) * if past_key_values: * position_ids = position_ids[:, -input_ids.shape[1] :] * ``` */ function createPositionIds(model_inputs, past_key_values = null, start_index = 0) { const { input_ids, inputs_embeds, attention_mask } = model_inputs; const { data, dims } = cumsum_masked_fill(attention_mask, start_index); let position_ids = new Tensor('int64', data, dims); if (past_key_values) { const offset = -(input_ids ?? inputs_embeds).dims.at(1); position_ids = position_ids.slice(null, [offset, null]); } return position_ids; } function decoder_prepare_inputs_for_generation(self, input_ids, model_inputs, generation_config) { const past_length = model_inputs.past_key_values ? Object.values(model_inputs.past_key_values)[0].dims.at(-2) : 0; if (!model_inputs.attention_mask) { // If the attention mask is not provided, we attempt to infer based on provided inputs let dims; for (const key of ['input_ids', 'inputs_embeds', 'position_ids']) { if (model_inputs[key]) { dims = model_inputs[key].dims; break; } } if (!dims) { throw new Error("attention_mask is not provided, and unable to infer its shape from model inputs."); } model_inputs.attention_mask = ones([dims[0], past_length + dims[1]]); } if (model_inputs.past_key_values) { const { input_ids, attention_mask } = model_inputs; // Keep only the unprocessed tokens: // 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where // some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as // input) if (attention_mask && attention_mask.dims[1] > input_ids.dims[1]) { // NOTE: not needed since we only pass the generated tokens to the next forward pass // const offset = -(attention_mask.dims[1] - past_length); // model_inputs.input_ids = input_ids.slice(null, [offset, null]); } // 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. // We can discard input_ids based on the past_length. else if (past_length < input_ids.dims[1]) { // NOTE: Required for phi models. // See https://github.com/huggingface/transformers/issues/30809#issuecomment-2111918479 for more information. model_inputs.input_ids = input_ids.slice(null, [past_length, null]); } // 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens. else { } } return model_inputs; } function encoder_decoder_prepare_inputs_for_generation(self, input_ids, model_inputs, generation_config) { if (model_inputs.past_key_values) { input_ids = input_ids.map(x => [x.at(-1)]); } return { ...model_inputs, decoder_input_ids: toI64Tensor(input_ids), }; } function multimodal_text_to_text_prepare_inputs_for_generation(self, ...args) { if (self.config.is_encoder_decoder) { return encoder_decoder_prepare_inputs_for_generation(self, ...args); } else { return decoder_prepare_inputs_for_generation(self, ...args); } } function multimodality_prepare_inputs_for_generation(self, input_ids, model_inputs, generation_config) { const has_past_key_values = !!model_inputs.past_key_values; if (generation_config.guidance_scale !== null && generation_config.guidance_scale > 1) { if (has_past_key_values) { model_inputs.input_ids = cat([ model_inputs.input_ids, model_inputs.input_ids, ], 0) // NOTE: attention_mask handled in generation } else { model_inputs.input_ids = cat([ model_inputs.input_ids, full_like(model_inputs.input_ids, BigInt(generation_config.pad_token_id)), ], 0); model_inputs.attention_mask = cat([ model_inputs.attention_mask, full_like(model_inputs.attention_mask, 0n), ], 0); } } if (has_past_key_values || !model_inputs.pixel_values) { model_inputs.pixel_values = full([0, 0, 3, 384, 384], 1.0); } if (has_past_key_values) { const num_img_tokens = 0; const num_text_tokens = 1; const has_image = num_img_tokens > 0 ? 1 : 0; const batch_size = 1; model_inputs.images_seq_mask = new Tensor( 'bool', new Array(num_img_tokens + num_text_tokens).fill(true).fill(false, 0, num_text_tokens), [batch_size, num_img_tokens + num_text_tokens], ); model_inputs.images_emb_mask = new Tensor( 'bool', new Array(num_img_tokens).fill(!!has_image), [batch_size, 1, num_img_tokens], ); } return model_inputs; } ////////////////////////////////////////////////// ////////////////////////////////////////////////// /** * A base class for pre-trained models that provides the model configuration and an ONNX session. */ export class PreTrainedModel extends Callable { main_input_name = 'input_ids'; forward_params = ['input_ids', 'attention_mask']; /** * Creates a new instance of the `PreTrainedModel` class. * @param {import('./configs.js').PretrainedConfig} config The model configuration. * @param {Record<string, any>} sessions The inference sessions for the model. * @param {Record<string, Object>} configs Additional configuration files (e.g., generation_config.json). */ constructor(config, sessions, configs) { super(); this.config = config; this.sessions = sessions; this.configs = configs; const modelName = MODEL_CLASS_TO_NAME_MAPPING.get(this.constructor); const modelType = MODEL_TYPE_MAPPING.get(modelName); this.can_generate = false; this._forward = null; this._prepare_inputs_for_generation = null; switch (modelType) { case MODEL_TYPES.DecoderOnly: this.can_generate = true; this._forward = decoderForward; this._prepare_inputs_for_generation = decoder_prepare_inputs_for_generation; break; case MODEL_TYPES.Seq2Seq: case MODEL_TYPES.Vision2Seq: case MODEL_TYPES.Musicgen: this.can_generate = true; this._forward = seq2seqForward; this._prepare_inputs_for_generation = encoder_decoder_prepare_inputs_for_generation; break; case MODEL_TYPES.EncoderDecoder: this._forward = seq2seqForward; break; case MODEL_TYPES.ImageTextToText: this.can_generate = true; this._forward = imageTextToTextForward; this._prepare_inputs_for_generation = multimodal_text_to_text_prepare_inputs_for_generation; break; case MODEL_TYPES.AudioTextToText: this.can_generate = true; this._forward = audioTextToTextForward; this._prepare_inputs_for_generation = multimodal_text_to_text_prepare_inputs_for_generation; break; case MODEL_TYPES.Phi3V: case MODEL_TYPES.ImageAudioTextToText: this.can_generate = true; this._prepare_inputs_for_generation = multimodal_text_to_text_prepare_inputs_for_generation; break; case MODEL_TYPES.MultiModality: this.can_generate = true; this._prepare_inputs_for_generation = multimodality_prepare_inputs_for_generation; break; case MODEL_TYPES.AutoEncoder: this._forward = autoEncoderForward; break; default: // should be MODEL_TYPES.EncoderOnly this._forward = encoderForward; break; } if (this.can_generate) { this.forward_params.push('past_key_values'); } /** @type {import('./configs.js').TransformersJSConfig} */ this.custom_config = this.config['transformers.js_config'] ?? {}; } /** * Disposes of all the ONNX sessions that were created during inference. * @returns {Promise<unknown[]>} An array of promises, one for each ONNX session that is being disposed. * @todo Use https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/FinalizationRegistry */ async dispose() { const promises = []; for (const session of Object.values(this.sessions)) { if (session?.handler?.dispose) { promises.push(session.handler.dispose()) } } return await Promise.all(promises); } /** * Instantiate one of the model classes of the library from a pretrained model. * * The model class to instantiate is selected based on the `model_type` property of the config object * (either passed as an argument or loaded from `pretrained_model_name_or_path` if possible) * * @param {string} pretrained_model_name_or_path The name or path of the pretrained model. Can be either: * - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. * Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a * user or organization name, like `dbmdz/bert-base-german-cased`. * - A path to a *directory* containing model weights, e.g., `./my_model_directory/`. * @param {import('./utils/hub.js').PretrainedModelOptions} options Additional options for loading the model. * * @returns {Promise<PreTrainedModel>} A new instance of the `PreTrainedModel` class. */ static async from_pretrained(pretrained_model_name_or_path, { progress_callback = null, config = null, cache_dir = null, local_files_only = false, revision = 'main', model_file_name = null, subfolder = 'onnx', device = null, dtype = null, use_external_data_format = null, session_options = {}, } = {}) { let options = { progress_callback, config, cache_dir, local_files_only, revision, model_file_name, subfolder, device, dtype, use_external_data_format, session_options, } const modelName = MODEL_CLASS_TO_NAME_MAPPING.get(this); const modelType = MODEL_TYPE_MAPPING.get(modelName); config = options.config = await AutoConfig.from_pretrained(pretrained_model_name_or_path, options); let info; if (modelType === MODEL_TYPES.DecoderOnly) { info = await Promise.all([ constructSessions(pretrained_model_name_or_path, { model: options.model_file_name ?? 'model', }, options), getOptionalConfigs(pretrained_model_name_or_path, { generation_config: 'generation_config.json', }, options), ]); } else if (modelType === MODEL_TYPES.Seq2Seq || modelType === MODEL_TYPES.Vision2Seq) { info = await Promise.all([ constructSessions(pretrained_model_name_or_path, { model: 'encoder_model', decoder_model_merged: 'decoder_model_merged', }, options), getOptionalConfigs(pretrained_model_name_or_path, { generation_config: 'generation_config.json', }, options), ]); } else if (modelType === MODEL_TYPES.MaskGeneration) { info = await Promise.all([ constructSessions(pretrained_model_name_or_path, { model: 'vision_encoder', prompt_encoder_mask_decoder: 'prompt_encoder_mask_decoder', }, options), ]); } else if (modelType === MODEL_TYPES.EncoderDecoder) { info = await Promise.all([ constructSessions(pretrained_model_name_or_path, { model: 'encoder_model', decoder_model_merged: 'decoder_model_merged', }, options), ]); } else if (modelType === MODEL_TYPES.ImageTextToText) { const sessions = { embed_tokens: 'embed_tokens', vision_encoder: 'vision_encoder', decoder_model_merged: 'decoder_model_merged', } if (config.is_encoder_decoder) { sessions['model'] = 'encoder_model'; } info = await Promise.all([ constructSessions(pretrained_model_name_or_path, sessions, options), getOptionalConfigs(pretrained_model_name_or_path, { generation_config: 'generation_config.json', }, options), ]); } else if (modelType === MODEL_TYPES.AudioTextToText) { const sessions = { embed_tokens: 'embed_tokens', audio_encoder: 'audio_encoder', decoder_model_merged: 'decoder_model_merged', } info = await Promise.all([ constructSessions(pretrained_model_name_or_path, sessions, options), getOptionalConfigs(pretrained_model_name_or_path, { generation_config: 'generation_config.json', }, options), ]); } else if (modelType === MODEL_TYPES.ImageAudioTextToText) { const sessions = { embed_tokens: 'embed_tokens', audio_encoder: 'audio_encoder', vision_encoder: 'vision_encoder', decoder_model_merged: 'decoder_model_merged', } info = await Promise.all([ constructSessions(pretrained_model_name_or_path, sessions, options), getOptionalConfigs(pretrained_model_name_or_path, { generation_config: 'generation_config.json', }, options), ]); } else if (modelType === MODEL_TYPES.Musicgen) { info = await Promise.all([ constructSessions(pretrained_model_name_or_path, { model: 'text_encoder', decoder_model_merged: 'decoder_model_merged', encodec_decode: 'encodec_decode', }, options), getOptionalConfigs(pretrained_model_name_or_path, { generation_config: 'generation_config.json', }, options), ]); } else if (modelType === MODEL_TYPES.MultiModality) { info = await Promise.all([ constructSessions(pretrained_model_name_or_path, { prepare_inputs_embeds: 'prepare_inputs_embeds', model: 'language_model', lm_head: 'lm_head', gen_head: 'gen_head', gen_img_embeds: 'gen_img_embeds', image_decode: 'image_decode', }, options), getOptionalConfigs(pretrained_model_name_or_path, { generation_config: 'generation_config.json', }, options), ]); } else if (modelType === MODEL_TYPES.Phi3V) { info = await Promise.all([ constructSessions(pretrained_model_name_or_path, { prepare_inputs_embeds: 'prepare_inputs_embeds', model: 'model', vision_encoder: 'vision_encoder', }, options), getOptionalConfigs(pretrained_model_name_or_path, { generation_config: 'generation_config.json', }, options), ]); } else if (modelType === MODEL_TYPES.AutoEncoder) { info = await Promise.all([ constructSessions(pretrained_model_name_or_path, { encoder_model: 'encoder_model', decoder_model: 'decoder_model', }, options), ]); } else { // should be MODEL_TYPES.EncoderOnly if (modelType !== MODEL_TYPES.EncoderOnly) { const type = modelName ?? config?.model_type; if (type !== 'custom') { console.warn(`Model type for '${type}' not found, assuming encoder-only architecture. Please report this at ${GITHUB_ISSUE_URL}.`) } } info = await Promise.all([ constructSessions(pretrained_model_name_or_path, { model: options.model_file_name ?? 'model', }, options), ]); } // @ts-ignore return new this(config, ...info); } /** * Runs the model with the provided inputs * @param {Object} model_inputs Object containing input tensors * @returns {Promise<Object>} Object containing output tensors */ async _call(model_inputs) { return await this.forward(model_inputs); } /** * Forward method for a pretrained model. If not overridden by a subclass, the correct forward method * will be chosen based on the model type. * @param {Object} model_inputs The input data to the model in the format specified in the ONNX model. * @returns {Promise<Object>} The output data from the model in the format specified in the ONNX model. * @throws {Error} This method must be implemented in subclasses. */ async forward(model_inputs) { return await this._forward(this, model_inputs); } /** * Get the model's generation config, if it exists. * @returns {GenerationConfig|null} The model's generation config if it exists, otherwise `null`. */ get generation_config() { return this.configs?.generation_config ?? null; } /** * This function returns a [`LogitsProcessorList`] list object that contains all relevant [`LogitsWarper`] * instances used for multinomial sampling. * @param {GenerationConfig} generation_config The generation config. * @returns {LogitsProcessorList} generation_config */ _get_logits_warper(generation_config) { // instantiate warpers list const warpers = new LogitsProcessorList(); if (generation_config.temperature !== null && generation_config.temperature !== 1.0) { warpers.push(new TemperatureLogitsWarper(generation_config.temperature)); } if (generation_config.top_k !== null && generation_config.top_k !== 0) { // TODO: add min_tokens_to_keep warpers.push(new TopKLogitsWarper(generation_config.top_k)); } if (generation_config.top_p !== null && generation_config.top_p < 1.0) { // TODO: add min_tokens_to_keep warpers.push(new TopPLogitsWarper(generation_config.top_p)); } return warpers; } /** * @param {GenerationConfig} generation_config * @param {number} input_ids_seq_length The starting sequence length for the input ids. * @returns {LogitsProcessorList} * @private */ _get_logits_processor( generation_config, input_ids_seq_length, // encoder_input_ids, TODO // prefix_allowed_tokens_fn, TODO logits_processor = null ) { const processors = new LogitsProcessorList(); // if (generation_config.diversity_penalty !== null && generation_config.diversity_penalty > 0.0) { // processors.push(new HammingDiversityLogitsProcessor( // generation_config.diversity_penalty, // generation_config.num_beams, // generation_config.num_beam_groups // )); // } // if (generation_config.encoder_repetition_penalty !== null && generation_config.encoder_repetition_penalty !== 1.0) { // processors.push(new EncoderRepetitionPenaltyLogitsProcessor( // generation_config.encoder_repetition_penalty, // encoder_input_ids // )); // } if (generation_config.repetition_penalty !== null && generation_config.repetition_penalty !== 1.0) { processors.push(new RepetitionPenaltyLogitsProcessor(generation_config.repetition_penalty)); } if (generation_config.no_repeat_ngram_size !== null && generation_config.no_repeat_ngram_size > 0) { processors.push(new NoRepeatNGramLogitsProcessor(generation_config.no_repeat_ngram_size)); } // if (generation_config.encoder_no_repeat_ngram_size !== null && generation_config.encoder_no_repeat_ngram_size > 0) { // if (this.config.is_encoder_decoder) { // processors.push(new EncoderNoRepeatNGramLogitsProcessor( // generation_config.encoder_no_repeat_ngram_size, // encoder_input_ids // )); // } else { // throw new Error("It's impossible to use `encoder_no_repeat_ngram_size` with decoder-only architecture"); // } // } if (generation_config.bad_words_ids !== null) { processors.push(new NoBadWordsLogitsProcessor(generation_config.bad_words_ids, generation_config.eos_token_id)); } if (generation_config.min_length !== null && generation_config.eos_token_id !== null && generation_config.min_length > 0) { processors.push(new MinLengthLogitsProcessor(generation_config.min_length, generation_config.eos_token_id)); } if (generation_config.min_new_tokens !== null && generation_config.eos_token_id !== null && generation_config.min_new_tokens > 0) { processors.push(new MinNewTokensLengthLogitsProcessor( input_ids_seq_length, generation_config.min_new_tokens, generation_config.eos_token_id )); } // if (prefix_allowed_tokens_fn !== null) { // processors.push(new PrefixConstrainedLogitsProcessor( // prefix_allowed_tokens_fn, // generation_config.num_beams / generation_config.num_beam_groups // )); // } if (generation_config.forced_bos_token_id !== null) { processors.push(new ForcedBOSTokenLogitsProcessor(generation_config.forced_bos_token_id)); } if (generation_config.forced_eos_token_id !== null) { processors.push(new ForcedEOSTokenLogitsProcessor( generation_config.max_length, generation_config.forced_eos_token_id )); } // if (generation_config.remove_invalid_values === true) { // processors.push(new InfNanRemoveLogitsProcessor()); // } // if (generation_config.exponential_decay_length_penalty !== null) { // processors.push(new ExponentialDecayLengthPenalty( // generation_config.exponential_decay_length_penalty, // generation_config.eos_token_id, // input_ids_seq_length // )); // } // if (generation_config.suppress_tokens !== null) { // processors.push(new SuppressTokensLogitsProcessor(generation_config.suppress_tokens)); // } if (generation_config.begin_suppress_tokens !== null) { const begin_index = (input_ids_seq_length > 1 || generation_config.forced_bos_token_id === null) ? input_ids_seq_length : input_ids_seq_length + 1; processors.push(new SuppressTokensAtBeginLogitsProcessor(generation_config.begin_suppress_tokens, begin_index)); } // DEPRECATED: https://github.com/huggingface/transformers/pull/29485 // if (generation_config.forced_decoder_ids !== null) { // processors.push(new ForceTokensLogitsProcessor(generation_config.forced_decoder_ids)); // } // 8. prepare batched CFG externally if (generation_config.guidance_scale !== null && generation_config.guidance_scale > 1) { processors.push(new ClassifierFreeGuidanceLogitsProcessor(generation_config.guidance_scale)); } if (logits_processor !== null) { processors.extend(logits_processor) } // `LogitNormalization` should always be the last logit processor, when present // if (generation_config.renormalize_logits === true) { // processors.push(new LogitNormalization()); // } return processors; } /** * This function merges multiple generation configs together to form a final generation config to be used by the model for text generation. * It first creates an empty `GenerationConfig` object, then it applies the model's own `generation_config` property to it. Finally, if a `generation_config` object was passed in the arguments, it overwrites the corresponding properties in the final config with those of the passed config object. * @param {GenerationConfig|null} generation_config A `GenerationConfig` object containing generation parameters. * @param {Object} kwargs Additional generation parameters to be used in place of those in the `generation_config` object. * @returns {GenerationConfig} The final generation config object to be used by the model for text generation. */ _prepare_generation_config(generation_config, kwargs, cls = GenerationConfig) { // Create empty generation config (contains defaults) // We pass `this.config` so that if `eos_token_id` or `bos_token_id` exist in the model's config, we will use them const config = { ...this.config }; for (const key of ["decoder", "generator", "text_config"]) { // Special case: some models have generation attributes set in the decoder. // Use them if still unset in the generation config. if (key in config) { Object.assign(config, config[key]); } } const gen_config = new cls(config); // Apply model's generation config, if it exists Object.assign(gen_config, this.generation_config ?? {}); // Next, use any generation config specified by the user // when calling `generate` if (generation_config) { Object.assign(gen_config, generation_config); } // Finally, if any kwargs were passed, use them to overwrite if (kwargs) { Object.assign(gen_config, pick(kwargs, Object.getOwnPropertyNames(gen_config))); } return gen_config; } /** * * @param {GenerationConfig} generation_config * @param {StoppingCriteriaList} [stopping_criteria=null] */ _get_stopping_criteria(generation_config, stopping_criteria = null) { const criteria = new StoppingCriteriaList(); if (generation_config.max_length !== null) { criteria.push(new MaxLengthCriteria( generation_config.max_length, this.config.max_position_embeddings ?? null, )); } // if (generation_config.max_time !== null) { // criteria.push(new MaxTimeCriteria(generation_config.max_time)); // } if (generation_config.eos_token_id !== null) { criteria.push(new EosTokenCriteria(generation_config.eos_token_id)); } if (stopping_criteria) { criteria.extend(stopping_criteria); } return criteria; } /** * Confirms that the model class is compatible with generation. * If not, raises an exception that points to the right class to use. */ _validate_model_class() { if (!this.can_generate) { const generate_compatible_mappings = [ MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, // MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING, // TODO MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, ]; const modelName = MODEL_CLASS_TO_NAME_MAPPING.get(this.constructor); const generate_compatible_classes = new Set(); const modelType = this.config.model_type; for (const model_mapping of generate_compatible_mappings) { const supported_models = model_mapping.get(modelType); if (supported_models) { generate_compatible_classes.add(supported_models[0]); } } let errorMessage = `The current model class (${modelName}) is not compatible with \`.generate()\`, as it doesn't have a language model head.` if (generate_compatible_classes.size > 0) { errorMessage += ` Please use the following class instead: ${[...generate_compatible_classes].join(', ')}`; } throw Error(errorMessage); } } prepare_inputs_for_generation(...args) { return this._prepare_inputs_for_generation(this, ...args); } /** * * @param {Object} inputs * @param {bigint[][]} inputs.generated_input_ids * @param {Object} inputs.outputs * @param {Object} inputs.model_inputs * @param {boolean} inputs.is_encoder_decoder * @returns {Object} The updated model inputs for the next generation iteration. */ _update_model_kwargs_for_generation({ generated_input_ids, outputs, model_inputs, is_encoder_decoder }) { // update past_key_values model_inputs['past_key_values'] = this.getPastKeyValues(outputs, model_inputs.past_key_values); // update inputs for next run model_inputs['input_ids'] = new Tensor('int64', generated_input_ids.flat(), [generated_input_ids.length, 1]); if (!is_encoder_decoder) { // update attention mask model_inputs.attention_mask = cat( [ model_inputs.attention_mask, ones([model_inputs.attention_mask.dims[0], 1]), ], 1 ); } else if ('decoder_attention_mask' in model_inputs) { // TODO: update decoder attention mask if the model requires it } // force recreate position_ids in next iteration model_inputs['position_ids'] = null; return model_inputs; } /** * This function extracts the model-specific `inputs` for generation. * @param {Object} params * @param {Tensor} [params.inputs=null] * @param {number} [params.bos_token_id=null] * @param {Record<string, Tensor|number[]>} [params.model_kwargs] * @returns {{inputs_tensor: Tensor, model_inputs: Record<string, Tensor>, model_input_name: string}} The model-specific inputs for generation. */ _prepare_model_inputs({ inputs, bos_token_id, model_kwargs }) { const model_inputs = pick(model_kwargs, this.forward_params); const input_name = this.main_input_name; if (input_name in model_inputs) { if (inputs) { throw new Error( "`inputs`: {inputs}` were passed alongside {input_name} which is not allowed. " + "Make sure to either pass {inputs} or {input_name}=..." ); } } else { model_inputs[input_name] = inputs; } const inputs_tensor = model_inputs[input_name]; return { inputs_tensor, model_inputs, model_input_name: input_name }; } async _prepare_encoder_decoder_kwargs_for_generation({ inputs_tensor, model_inputs, model_input_name, generation_config }) { if ( this.sessions['model'].inputNames.includes('inputs_embeds') && !model_inputs.inputs_embeds && '_prepare_inputs_embeds' in this ) { // Encoder expects `inputs_embeds` instead of `input_ids` const { input_ids, pixel_values, attention_mask, ...kwargs } = model_inputs; // @ts-ignore const prepared_inputs = await this._prepare_inputs_embeds(model_inputs); model_inputs = { ...kwargs, ...pick(prepared_inputs, ['inputs_embeds', 'attention_mask']), }; } let { last_hidden_state } = await encoderForward(this, model_inputs); // for classifier free guidance we need to add a 'null' input to our encoder hidden states if (generation_config.guidance_scale !== null && generation_config.guidance_scale > 1) { last_hidden_state = cat([ last_hidden_state, full_like(last_hidden_state, 0.0), ], 0); if ('attention_mask' in model_inputs) { model_inputs['attention_mask'] = cat([ model_inputs['attention_mask'], zeros_like(model_inputs['attention_mask']), ], 0); } } else if (model_inputs.decoder_input_ids) { // Ensure that the encoder outputs have the same batch size as the decoder inputs, // allowing for more efficient batched generation for single inputs const decoder_input_ids_batch_size = toI64Tensor(model_inputs.decoder_input_ids).dims[0]; if (decoder_input_ids_batch_size !== last_hidden_state.dims[0]) { if (last_hidden_state.dims[0] !== 1) { throw new Error( `The encoder outputs have a different batch size (${last_hidden_state.dims[0]}) than the decoder inputs (${decoder_input_ids_batch_size}).` ) } last_hidden_state = cat(Array.from({ length: decoder_input_ids_batch_size }, () => last_hidden_state), 0); } } model_inputs['encoder_outputs'] = last_hidden_state; return model_inputs; } /** * Prepares `decoder_input_ids` for generation with encoder-decoder models * @param {*} param0 */ _prepare_decoder_input_ids_for_generation({ batch_size, model_input_name, model_kwargs, decoder_start_token_id, bos_token_id, generation_config }) { let { decoder_input_ids, ...model_inputs } = model_kwargs; // Prepare input ids if the user has not defined `decoder_input_ids` manually. if (!(decoder_input_ids instanceof Tensor)) { if (!decoder_input_ids) { decoder_start_token_id ??= bos_token_id; if (this.config.model_type === 'musicgen') { // Custom logic (TODO: move to Musicgen class) decoder_input_ids = Array.from({ // @ts-expect-error TS2339 length: batch_size * this.config.decoder.num_codebooks }, () => [decoder_start_token_id]); } else if (Array.isArray(decoder_start_token_id)) { if (decoder_start_token_id.length !== batch_size) { throw new Error( `\`decoder_start_token_id\` expcted to have length ${batch_size} but got ${decoder_start_token_id.length}` ) } decoder_input_ids = decoder_start_token_id; } else { decoder_input_ids = Array.from({ length: batch_size, }, () => [decoder_start_token_id]); } } else if (!Array.isArray(decoder_input_ids[0])) { // Correct batch size decoder_input_ids = Array.from({ length: batch_size, }, () => decoder_input_ids); } decoder_input_ids = toI64Tensor(decoder_input_ids); } model_kwargs['decoder_attention_mask'] = ones_like(decoder_input_ids); return { input_ids: decoder_input_ids, model_inputs }; } /** * Generates sequences of token ids for models with a language modeling head. * @param {import('./generation/parameters.js').GenerationFunctionParameters} options * @returns {Promise<ModelOutput|Tensor>} The output of the model, which can contain the generated token ids, attentions, and scores. */ async generate({ inputs = null, generation_config = null, logits_processor = null, stopping_criteria = null, streamer = null, // inputs_attention_mask = null, ...kwargs }) { this._validate_model_class(); // Update generation config with defaults and kwargs generation_config = this._prepare_generation_config(generation_config, kwargs); // 3. Define model inputs let { inputs_tensor, model_inputs, model_input_name } = this._prepare_model_inputs({ inputs, model_kwargs: kwargs, }); const is_encoder_decoder = this.config.is_encoder_decoder; // 4. Define other model kwargs if (!is_encoder_decoder) { // decoder-only models should use left-padding for generation } else if (!('encoder_outputs' in model_inputs)) { // if model is encoder decoder encoder_outputs are created // and added to `model_kwargs` model_inputs = await this._prepare_encoder_decoder_kwargs_for_generation( { inputs_tensor, model_inputs, model_input_name, generation_config } ) } // 5. Prepare `input_ids` which will be used for auto-regressive generation // TODO: Update to align with HF transformers' implementation let input_ids; if (is_encoder_decoder) { // Generating from the encoder outputs ({ input_ids, model_inputs } = this._prepare_decoder_input_ids_for_generation({ batch_size: model_inputs[model_input_name].dims.at(0), model_input_name, model_kwargs: model_inputs, decoder_start_token_id: generation_config.decoder_start_token_id, bos_token_id: generation_config.bos_token_id, generation_config, })); } else { input_ids = model_inputs[model_input_name] } // 6. Prepare `max_length` depending on other stopping criteria. let input_ids_length = input_ids.dims.at(-1); if (generation_config.max_new_tokens !== null) { generation_config.max_length = input_ids_length + generation_config.max_new_tokens; } // input_ids_length = model_inputs[model_input_name].dims.at(1); // // inputs instanceof Tensor ? : inputs.length; // // decoder-only // if (input_ids_length === 0) { // throw Error("Must supply a non-empty array of input token ids.") // } // let decoder_input_ids = // generation_config.decoder_input_ids // ?? generation_config.decoder_start_token_id // ?? generation_config.bos_token_id // ?? generation_config.eos_token_id; // Update logits processor // 8. prepare distribution pre_processing samplers const prepared_logits_processor = this._get_logits_processor( generation_config, input_ids_length, logits_processor, ) // 9. prepare stopping criteria const prepared_stopping_criteria = this._get_stopping_criteria( generation_config, stopping_criteria ) // /** @type {number[]} */ // let eos_token_ids = generation_config.eos_token_id; // if (eos_token_ids !== null && !Array.isArray(eos_token_ids)) { // eos_token_ids = [eos_token_ids]; // } const numInputs = model_inputs[model_input_name].dims.at(0); // TODO: // done is a list of booleans to keep track of which inputs are done // const done = new Array(numInputs).fill(false); // For efficiency purposes, we remove completed rows from model_inputs // when the beam is complete, and we keep track of the row index // const rowIndexToBatchIndex = new Map(); const sampler = LogitsSampler.getSampler(generation_config); // TODO make > numInputs const scores = new Array(numInputs).fill(0); /** @type {bigint[][]} */ const all_input_ids = input_ids.tolist(); if (streamer) { streamer.put(all_input_ids); } // const all_generated_input_ids = Array.from({ length: numInputs }, () => []); // NOTE: For now, we don't support spawning new beams // TODO: when we do, we simply copy past key values and accumulate into single large tensor //////////////////////////////////////////////////// // Generic search which handles 4 generation modes: // - GenerationMode.GREEDY_SEARCH // - GenerationMode.SAMPLE // - GenerationMode.BEAM_SEARCH // - GenerationMode.BEAM_SAMPLE //////////////////////////////////////////////////// let outputs; let attentions = {}; while (true) { // prepare model inputs model_inputs = this.prepare_inputs_for_generation(all_input_ids, model_inputs, generation_config); outputs = await this.forward(model_inputs); if (generation_config.output_attentions && generation_config.return_dict_in_generate) { // Get attentions if they are present const token_attentions = this.getAttentions(outputs); for (const key in token_attentions) { if (!(key in attentions)) { attentions[key] = []; } attentions[key].push(token_attentions[key]); } } // Logits are of the form [batch_size, out_seq_length, vocab_size] // In most cases, this will be [batch_size, 1, vocab_size] // So, we select the last token's logits: // (equivalent to `logits = outputs.logits[:, -1, :]`) const logits = outputs.logits.slice(null, -1, null); const next_tokens_scores = prepared_logits_processor(all_input_ids, logits); /** @type {[bigint][]} */ const generated_input_ids = []; // const new_kv_cache = [];// NOTE: Only used for beam search when concatenating new kv // Loop over each batch for (let batch_idx = 0; batch_idx < next_tokens_scores.dims.at(0); ++batch_idx) { const logs = next_tokens_scores[batch_idx]; const sampledTokens = await sampler(logs); for (const [newTokenId, logProb] of sampledTokens) { const bigint = BigInt(newTokenId); // TODO: If branching, use previous beam as a starting point // update generated ids, model inputs, and length for next step scores[batch_idx] += logProb; all_input_ids[batch_idx].push(bigint); generated_input_ids.push([bigint]); // TODO: Support beam search break; } } if (streamer) { streamer.put(generated_input_ids); } const stop = prepared_stopping_criteria(all_input_ids); if (stop.every(x => x)) { break; } model_inputs = this._update_model_kwargs_for_generation({ generated_input_ids, outputs, model_inputs, is_encoder_decoder, }); } if (streamer) { streamer.end(); } // Retrieve and dispose all final past key values (including encoder attentions) const past_key_values = this.getPastKeyValues(outputs, model_inputs.past_key_values, true); // TODO: ensure all_input_ids is padded correctly... const sequences = new Tensor('int64', all_input_ids.flat(), [all_input_ids.length, all_input_ids[0].length]); if (generation_config.return_dict_in_generate) { return { sequences, past_key_values, ...attentions, // TODO: // scores, // logits, } } else { // Dispose all remaining tensors for (const tensor of Object.values(outputs)) { if (tensor.location === 'gpu-buffer') { tensor.dispose(); } } return sequences; } } /** * Returns an object containing past key values from the given decoder results object. * * @param {Object} decoderResults The decoder results object. * @param {Object} pastKeyValues The previous past key values. * @returns {Object} An object containing past key values. */ getPastKeyValues(decoderResults, pastKeyValues, disposeEncoderPKVs = false) { const pkvs = Object.create(null); for (const name in decoderResults) { if (name.startsWith('present')) { const newName = name .replace('present_conv', 'past_conv') // Hybrid cache architecture (e.g., LFM2) .replace('present', 'past_key_values'); const is_encoder_pkv = name.includes('encoder'); if (is_encoder_pkv && pastKeyValues) { // Optimization introduced by optimum to reuse past key values. // So, we just replace the constant outputs (`decoderResults[name]`) with the previous past key values. // https://github.com/huggingface/optimum/blob/0bf2c05fb7e1182b52d21b703cfc95fd9e4ea3dc/optimum/onnxruntime/base.py#L677-L704 pkvs[newName] = pastKeyValues[newName]; } else { // decoder or using first encoder PKVs pkvs[newName] = decoderResults[name]; } if (pastKeyValues && (!is_encoder_pkv || disposeEncoderPKVs)) { // - Always dispose decoder PKVs // - Only dispose encoder past key values when requested (after generation) const t = pastKeyValues[newName]; if (t.location === 'gpu-buffer') { t.dispose(); } } } } return pkvs; } /** * Returns an object containing attentions from the given model output object. * * @param {Object} model_output The output of the model. * @returns {{cross_attentions?: Tensor[]}} An object containing attentions. */ getAttentions(model_output) { const attentions = {}; for (const attnName of ['cross_attentions', 'encoder_attentions', 'decoder_attentions']) { for (const name in model_output) { if (name.startsWith(attnName)) { if (!(attnName in attentions)) { attentions[attnName] = []; } attentions[attnName].push(model_output[name]); } } } return attentions; } /** * Adds past key values to the decoder feeds object. If pastKeyValues is null, creates new tensors for past key values. * * @param {Object} decoderFeeds The decoder feeds object to add past key values to. * @param {Object} pastKeyValues An object containing past key values. */ addPastKeyValues(decoderFeeds, pastKeyValues) { if (pastKeyValues) { Object.assign(decoderFeeds, pastKeyValues) } else { const session = this.sessions['decoder_model_merged'] ?? this.sessions['model']; const batch_size = (decoderFeeds[this.main_input_name] ?? decoderFeeds.attention_mask)?.dims?.[0] ?? 1; const dtype = session?.config?.kv_cache_dtype ?? 'float32'; const cls = (dtype === 'float16') ? DataTypeMap.float16 : DataTypeMap.float32; const shapes = getCacheShapes(this.config, { batch_size }); for (const name in shapes) { const size = shapes[name].reduce((a, b) => a * b, 1); decoderFeeds[name] = new Tensor(dtype, new cls(size), shapes[name]); } } } async encode_image({ pixel_values }) { // image_inputs === { pixel_values } return (await sessionRun(this.sessions['vision_encoder'], { pixel_values })).image_features; } async encode_text({ input_ids }) { // text_inputs === { input_ids, attention_mask } return (await sessionRun(this.sessions['embed_tokens'], { input_ids })).inputs_embeds; } async encode_audio({ audio_values }) { // audio_inputs === { audio_values } return (await sessionRun(this.sessions['audio_encoder'], { audio_values })).audio_features; } } ////////////////////////////////////////////////// // Base model output class export class ModelOutput { } /** * Base class for model's outputs, with potential hidden states and attentions. */ export class BaseModelOutput extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.last_hidden_state Sequence of hidden-states at the output of the last layer of the model. * @param {Tensor} [output.hidden_states] Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. * @param {Tensor} [output.attentions] Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. */ constructor({ last_hidden_state, hidden_states = null, attentions = null }) { super(); this.last_hidden_state = last_hidden_state; this.hidden_states = hidden_states; this.attentions = attentions; } } ////////////////////////////////////////////////// // Bert models export class BertPreTrainedModel extends PreTrainedModel { } export class BertModel extends BertPreTrainedModel { } /** * BertForMaskedLM is a class representing a BERT model for masked language modeling. */ export class BertForMaskedLM extends BertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling. */ async _call(model_inputs) { return new MaskedLMOutput(await super._call(model_inputs)); } } /** * BertForSequenceClassification is a class representing a BERT model for sequence classification. */ export class BertForSequenceClassification extends BertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } /** * BertForTokenClassification is a class representing a BERT model for token classification. */ export class BertForTokenClassification extends BertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification. */ async _call(model_inputs) { return new TokenClassifierOutput(await super._call(model_inputs)); } } /** * BertForQuestionAnswering is a class representing a BERT model for question answering. */ export class BertForQuestionAnswering extends BertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering. */ async _call(model_inputs) { return new QuestionAnsweringModelOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // NeoBert models export class NeoBertPreTrainedModel extends PreTrainedModel { } export class NeoBertModel extends NeoBertPreTrainedModel { } export class NeoBertForMaskedLM extends NeoBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling. */ async _call(model_inputs) { return new MaskedLMOutput(await super._call(model_inputs)); } } export class NeoBertForSequenceClassification extends NeoBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } export class NeoBertForTokenClassification extends NeoBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification. */ async _call(model_inputs) { return new TokenClassifierOutput(await super._call(model_inputs)); } } export class NeoBertForQuestionAnswering extends NeoBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering. */ async _call(model_inputs) { return new QuestionAnsweringModelOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // ModernBert models export class ModernBertPreTrainedModel extends PreTrainedModel { } export class ModernBertModel extends ModernBertPreTrainedModel { } export class ModernBertForMaskedLM extends ModernBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling. */ async _call(model_inputs) { return new MaskedLMOutput(await super._call(model_inputs)); } } export class ModernBertForSequenceClassification extends ModernBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } export class ModernBertForTokenClassification extends ModernBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification. */ async _call(model_inputs) { return new TokenClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // ModernBERT Decoder models export class ModernBertDecoderPreTrainedModel extends PreTrainedModel { } export class ModernBertDecoderModel extends ModernBertDecoderPreTrainedModel { } export class ModernBertDecoderForCausalLM extends ModernBertDecoderPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // NomicBert models export class NomicBertPreTrainedModel extends PreTrainedModel { } export class NomicBertModel extends NomicBertPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // RoFormer models export class RoFormerPreTrainedModel extends PreTrainedModel { } /** * The bare RoFormer Model transformer outputting raw hidden-states without any specific head on top. */ export class RoFormerModel extends RoFormerPreTrainedModel { } /** * RoFormer Model with a `language modeling` head on top. */ export class RoFormerForMaskedLM extends RoFormerPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling. */ async _call(model_inputs) { return new MaskedLMOutput(await super._call(model_inputs)); } } /** * RoFormer Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) */ export class RoFormerForSequenceClassification extends RoFormerPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } /** * RoFormer Model with a token classification head on top (a linear layer on top of the hidden-states output) * e.g. for Named-Entity-Recognition (NER) tasks. */ export class RoFormerForTokenClassification extends RoFormerPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification. */ async _call(model_inputs) { return new TokenClassifierOutput(await super._call(model_inputs)); } } /** * RoFormer Model with a span classification head on top for extractive question-answering tasks like SQuAD * (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). */ export class RoFormerForQuestionAnswering extends RoFormerPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering. */ async _call(model_inputs) { return new QuestionAnsweringModelOutput(await super._call(model_inputs)); } } // TODO: Add RoFormerForCausalLM and RoFormerForMultipleChoice ////////////////////////////////////////////////// ////////////////////////////////////////////////// // ConvBert models export class ConvBertPreTrainedModel extends PreTrainedModel { } /** * The bare ConvBERT Model transformer outputting raw hidden-states without any specific head on top. */ export class ConvBertModel extends ConvBertPreTrainedModel { } /** * ConvBERT Model with a language modeling head on top. */ export class ConvBertForMaskedLM extends ConvBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling. */ async _call(model_inputs) { return new MaskedLMOutput(await super._call(model_inputs)); } } /** * ConvBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) */ export class ConvBertForSequenceClassification extends ConvBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } /** * ConvBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) * e.g. for Named-Entity-Recognition (NER) tasks. */ export class ConvBertForTokenClassification extends ConvBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification. */ async _call(model_inputs) { return new TokenClassifierOutput(await super._call(model_inputs)); } } /** * ConvBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD * (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`) */ export class ConvBertForQuestionAnswering extends ConvBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering. */ async _call(model_inputs) { return new QuestionAnsweringModelOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Electra models export class ElectraPreTrainedModel extends PreTrainedModel { } /** * The bare Electra Model transformer outputting raw hidden-states without any specific head on top. * Identical to the BERT model except that it uses an additional linear layer between the embedding * layer and the encoder if the hidden size and embedding size are different. */ export class ElectraModel extends ElectraPreTrainedModel { } // TODO add ElectraForPreTraining /** * Electra model with a language modeling head on top. */ export class ElectraForMaskedLM extends ElectraPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling. */ async _call(model_inputs) { return new MaskedLMOutput(await super._call(model_inputs)); } } /** * ELECTRA Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) */ export class ElectraForSequenceClassification extends ElectraPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } /** * Electra model with a token classification head on top. */ export class ElectraForTokenClassification extends ElectraPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification. */ async _call(model_inputs) { return new TokenClassifierOutput(await super._call(model_inputs)); } } /** * LECTRA Model with a span classification head on top for extractive question-answering tasks like SQuAD * (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). */ export class ElectraForQuestionAnswering extends ElectraPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering. */ async _call(model_inputs) { return new QuestionAnsweringModelOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // CamemBERT models export class CamembertPreTrainedModel extends PreTrainedModel { } /** * The bare CamemBERT Model transformer outputting raw hidden-states without any specific head on top. */ export class CamembertModel extends CamembertPreTrainedModel { } /** * CamemBERT Model with a `language modeling` head on top. */ export class CamembertForMaskedLM extends CamembertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling. */ async _call(model_inputs) { return new MaskedLMOutput(await super._call(model_inputs)); } } /** * CamemBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. */ export class CamembertForSequenceClassification extends CamembertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } /** * CamemBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. */ export class CamembertForTokenClassification extends CamembertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification. */ async _call(model_inputs) { return new TokenClassifierOutput(await super._call(model_inputs)); } } /** * CamemBERT Model with a span classification head on top for extractive question-answering tasks */ export class CamembertForQuestionAnswering extends CamembertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering. */ async _call(model_inputs) { return new QuestionAnsweringModelOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // DeBERTa models export class DebertaPreTrainedModel extends PreTrainedModel { } /** * The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top. */ export class DebertaModel extends DebertaPreTrainedModel { } /** * DeBERTa Model with a `language modeling` head on top. */ export class DebertaForMaskedLM extends DebertaPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling. */ async _call(model_inputs) { return new MaskedLMOutput(await super._call(model_inputs)); } } /** * DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) */ export class DebertaForSequenceClassification extends DebertaPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } /** * DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. */ export class DebertaForTokenClassification extends DebertaPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification. */ async _call(model_inputs) { return new TokenClassifierOutput(await super._call(model_inputs)); } } /** * DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear * layers on top of the hidden-states output to compute `span start logits` and `span end logits`). */ export class DebertaForQuestionAnswering extends DebertaPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering. */ async _call(model_inputs) { return new QuestionAnsweringModelOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // DeBERTa-v2 models export class DebertaV2PreTrainedModel extends PreTrainedModel { } /** * The bare DeBERTa-V2 Model transformer outputting raw hidden-states without any specific head on top. */ export class DebertaV2Model extends DebertaV2PreTrainedModel { } /** * DeBERTa-V2 Model with a `language modeling` head on top. */ export class DebertaV2ForMaskedLM extends DebertaV2PreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling. */ async _call(model_inputs) { return new MaskedLMOutput(await super._call(model_inputs)); } } /** * DeBERTa-V2 Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) */ export class DebertaV2ForSequenceClassification extends DebertaV2PreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } /** * DeBERTa-V2 Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. */ export class DebertaV2ForTokenClassification extends DebertaV2PreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification. */ async _call(model_inputs) { return new TokenClassifierOutput(await super._call(model_inputs)); } } /** * DeBERTa-V2 Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear * layers on top of the hidden-states output to compute `span start logits` and `span end logits`). */ export class DebertaV2ForQuestionAnswering extends DebertaV2PreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering. */ async _call(model_inputs) { return new QuestionAnsweringModelOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // DistilBert models export class DistilBertPreTrainedModel extends PreTrainedModel { } export class DistilBertModel extends DistilBertPreTrainedModel { } /** * DistilBertForSequenceClassification is a class representing a DistilBERT model for sequence classification. */ export class DistilBertForSequenceClassification extends DistilBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } /** * DistilBertForTokenClassification is a class representing a DistilBERT model for token classification. */ export class DistilBertForTokenClassification extends DistilBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification. */ async _call(model_inputs) { return new TokenClassifierOutput(await super._call(model_inputs)); } } /** * DistilBertForQuestionAnswering is a class representing a DistilBERT model for question answering. */ export class DistilBertForQuestionAnswering extends DistilBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering. */ async _call(model_inputs) { return new QuestionAnsweringModelOutput(await super._call(model_inputs)); } } /** * DistilBertForMaskedLM is a class representing a DistilBERT model for masking task. */ export class DistilBertForMaskedLM extends DistilBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<MaskedLMOutput>} returned object */ async _call(model_inputs) { return new MaskedLMOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // ESM models export class EsmPreTrainedModel extends PreTrainedModel { } /** * The bare ESM Model transformer outputting raw hidden-states without any specific head on top. */ export class EsmModel extends EsmPreTrainedModel { } /** * ESM Model with a `language modeling` head on top. */ export class EsmForMaskedLM extends EsmPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling. */ async _call(model_inputs) { return new MaskedLMOutput(await super._call(model_inputs)); } } /** * ESM Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) */ export class EsmForSequenceClassification extends EsmPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } /** * ESM Model with a token classification head on top (a linear layer on top of the hidden-states output) * e.g. for Named-Entity-Recognition (NER) tasks. */ export class EsmForTokenClassification extends EsmPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification. */ async _call(model_inputs) { return new TokenClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // MobileBert models export class MobileBertPreTrainedModel extends PreTrainedModel { } export class MobileBertModel extends MobileBertPreTrainedModel { } /** * MobileBertForMaskedLM is a class representing a MobileBERT model for masking task. */ export class MobileBertForMaskedLM extends MobileBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<MaskedLMOutput>} returned object */ async _call(model_inputs) { return new MaskedLMOutput(await super._call(model_inputs)); } } /** * MobileBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) */ export class MobileBertForSequenceClassification extends MobileBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} returned object */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } /** * MobileBert Model with a span classification head on top for extractive question-answering tasks */ export class MobileBertForQuestionAnswering extends MobileBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<QuestionAnsweringModelOutput>} returned object */ async _call(model_inputs) { return new QuestionAnsweringModelOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // MPNet models export class MPNetPreTrainedModel extends PreTrainedModel { } /** * The bare MPNet Model transformer outputting raw hidden-states without any specific head on top. */ export class MPNetModel extends MPNetPreTrainedModel { } /** * MPNetForMaskedLM is a class representing a MPNet model for masked language modeling. */ export class MPNetForMaskedLM extends MPNetPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling. */ async _call(model_inputs) { return new MaskedLMOutput(await super._call(model_inputs)); } } /** * MPNetForSequenceClassification is a class representing a MPNet model for sequence classification. */ export class MPNetForSequenceClassification extends MPNetPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } /** * MPNetForTokenClassification is a class representing a MPNet model for token classification. */ export class MPNetForTokenClassification extends MPNetPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification. */ async _call(model_inputs) { return new TokenClassifierOutput(await super._call(model_inputs)); } } /** * MPNetForQuestionAnswering is a class representing a MPNet model for question answering. */ export class MPNetForQuestionAnswering extends MPNetPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering. */ async _call(model_inputs) { return new QuestionAnsweringModelOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // SqueezeBert models export class SqueezeBertPreTrainedModel extends PreTrainedModel { } export class SqueezeBertModel extends SqueezeBertPreTrainedModel { } export class SqueezeBertForMaskedLM extends SqueezeBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<MaskedLMOutput>} returned object */ async _call(model_inputs) { return new MaskedLMOutput(await super._call(model_inputs)); } } export class SqueezeBertForSequenceClassification extends SqueezeBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} returned object */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } export class SqueezeBertForQuestionAnswering extends SqueezeBertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<QuestionAnsweringModelOutput>} returned object */ async _call(model_inputs) { return new QuestionAnsweringModelOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Albert models export class AlbertPreTrainedModel extends PreTrainedModel { } export class AlbertModel extends AlbertPreTrainedModel { } export class AlbertForSequenceClassification extends AlbertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} returned object */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } export class AlbertForQuestionAnswering extends AlbertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<QuestionAnsweringModelOutput>} returned object */ async _call(model_inputs) { return new QuestionAnsweringModelOutput(await super._call(model_inputs)); } } export class AlbertForMaskedLM extends AlbertPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<MaskedLMOutput>} returned object */ async _call(model_inputs) { return new MaskedLMOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // T5 models export class T5PreTrainedModel extends PreTrainedModel { forward_params = [ 'input_ids', 'attention_mask', 'encoder_outputs', 'decoder_input_ids', 'decoder_attention_mask', 'past_key_values', ]; }; export class T5Model extends T5PreTrainedModel { } /** * T5Model is a class representing a T5 model for conditional generation. */ export class T5ForConditionalGeneration extends T5PreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // LONGT5 models /** * An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. */ export class LongT5PreTrainedModel extends PreTrainedModel { }; /** * The bare LONGT5 Model transformer outputting raw hidden-states without any specific head on top. */ export class LongT5Model extends LongT5PreTrainedModel { } /** * LONGT5 Model with a `language modeling` head on top. */ export class LongT5ForConditionalGeneration extends LongT5PreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // MT5 models export class MT5PreTrainedModel extends PreTrainedModel { }; export class MT5Model extends MT5PreTrainedModel { } /** * A class representing a conditional sequence-to-sequence model based on the MT5 architecture. */ export class MT5ForConditionalGeneration extends MT5PreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Bart models export class BartPretrainedModel extends PreTrainedModel { }; /** * The bare BART Model outputting raw hidden-states without any specific head on top. */ export class BartModel extends BartPretrainedModel { } /** * The BART Model with a language modeling head. Can be used for summarization. */ export class BartForConditionalGeneration extends BartPretrainedModel { } /** * Bart model with a sequence classification/head on top (a linear layer on top of the pooled output) */ export class BartForSequenceClassification extends BartPretrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // MBart models export class MBartPreTrainedModel extends PreTrainedModel { }; /** * The bare MBART Model outputting raw hidden-states without any specific head on top. */ export class MBartModel extends MBartPreTrainedModel { } /** * The MBART Model with a language modeling head. Can be used for summarization, after fine-tuning the pretrained models. */ export class MBartForConditionalGeneration extends MBartPreTrainedModel { } /** * MBart model with a sequence classification/head on top (a linear layer on top of the pooled output). */ export class MBartForSequenceClassification extends MBartPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } export class MBartForCausalLM extends MBartPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Blenderbot models export class BlenderbotPreTrainedModel extends PreTrainedModel { }; /** * The bare Blenderbot Model outputting raw hidden-states without any specific head on top. */ export class BlenderbotModel extends BlenderbotPreTrainedModel { } /** * The Blenderbot Model with a language modeling head. Can be used for summarization. */ export class BlenderbotForConditionalGeneration extends BlenderbotPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Blenderbot models export class BlenderbotSmallPreTrainedModel extends PreTrainedModel { }; /** * The bare BlenderbotSmall Model outputting raw hidden-states without any specific head on top. */ export class BlenderbotSmallModel extends BlenderbotSmallPreTrainedModel { } /** * The BlenderbotSmall Model with a language modeling head. Can be used for summarization. */ export class BlenderbotSmallForConditionalGeneration extends BlenderbotSmallPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Roberta models export class RobertaPreTrainedModel extends PreTrainedModel { } export class RobertaModel extends RobertaPreTrainedModel { } /** * RobertaForMaskedLM class for performing masked language modeling on Roberta models. */ export class RobertaForMaskedLM extends RobertaPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<MaskedLMOutput>} returned object */ async _call(model_inputs) { return new MaskedLMOutput(await super._call(model_inputs)); } } /** * RobertaForSequenceClassification class for performing sequence classification on Roberta models. */ export class RobertaForSequenceClassification extends RobertaPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} returned object */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } /** * RobertaForTokenClassification class for performing token classification on Roberta models. */ export class RobertaForTokenClassification extends RobertaPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification. */ async _call(model_inputs) { return new TokenClassifierOutput(await super._call(model_inputs)); } } /** * RobertaForQuestionAnswering class for performing question answering on Roberta models. */ export class RobertaForQuestionAnswering extends RobertaPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<QuestionAnsweringModelOutput>} returned object */ async _call(model_inputs) { return new QuestionAnsweringModelOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // XLM models /** * An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. */ export class XLMPreTrainedModel extends PreTrainedModel { } /** * The bare XLM Model transformer outputting raw hidden-states without any specific head on top. */ export class XLMModel extends XLMPreTrainedModel { } /** * The XLM Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). */ export class XLMWithLMHeadModel extends XLMPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<MaskedLMOutput>} returned object */ async _call(model_inputs) { return new MaskedLMOutput(await super._call(model_inputs)); } } /** * XLM Model with a sequence classification/regression head on top (a linear layer on top of the pooled output) */ export class XLMForSequenceClassification extends XLMPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} returned object */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } /** * XLM Model with a token classification head on top (a linear layer on top of the hidden-states output) */ export class XLMForTokenClassification extends XLMPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification. */ async _call(model_inputs) { return new TokenClassifierOutput(await super._call(model_inputs)); } } /** * XLM Model with a span classification head on top for extractive question-answering tasks */ export class XLMForQuestionAnswering extends XLMPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<QuestionAnsweringModelOutput>} returned object */ async _call(model_inputs) { return new QuestionAnsweringModelOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // XLMRoberta models export class XLMRobertaPreTrainedModel extends PreTrainedModel { } export class XLMRobertaModel extends XLMRobertaPreTrainedModel { } /** * XLMRobertaForMaskedLM class for performing masked language modeling on XLMRoberta models. */ export class XLMRobertaForMaskedLM extends XLMRobertaPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<MaskedLMOutput>} returned object */ async _call(model_inputs) { return new MaskedLMOutput(await super._call(model_inputs)); } } /** * XLMRobertaForSequenceClassification class for performing sequence classification on XLMRoberta models. */ export class XLMRobertaForSequenceClassification extends XLMRobertaPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} returned object */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } /** * XLMRobertaForTokenClassification class for performing token classification on XLMRoberta models. */ export class XLMRobertaForTokenClassification extends XLMRobertaPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification. */ async _call(model_inputs) { return new TokenClassifierOutput(await super._call(model_inputs)); } } /** * XLMRobertaForQuestionAnswering class for performing question answering on XLMRoberta models. */ export class XLMRobertaForQuestionAnswering extends XLMRobertaPreTrainedModel { /** * Calls the model on new inputs. * * @param {Object} model_inputs The inputs to the model. * @returns {Promise<QuestionAnsweringModelOutput>} returned object */ async _call(model_inputs) { return new QuestionAnsweringModelOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Audio Spectrogram Transformer (AST) models export class ASTPreTrainedModel extends PreTrainedModel { }; /** * The bare AST Model transformer outputting raw hidden-states without any specific head on top. */ export class ASTModel extends ASTPreTrainedModel { } /** * Audio Spectrogram Transformer model with an audio classification head on top * (a linear layer on top of the pooled output) e.g. for datasets like AudioSet, Speech Commands v2. */ export class ASTForAudioClassification extends ASTPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Whisper models export class WhisperPreTrainedModel extends PreTrainedModel { requires_attention_mask = false; main_input_name = 'input_features'; forward_params = [ 'input_features', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'past_key_values', ]; }; /** * WhisperModel class for training Whisper models without a language model head. */ export class WhisperModel extends WhisperPreTrainedModel { } /** * WhisperForConditionalGeneration class for generating conditional outputs from Whisper models. */ export class WhisperForConditionalGeneration extends WhisperPreTrainedModel { _prepare_generation_config(generation_config, kwargs) { return /** @type {WhisperGenerationConfig} */ (super._prepare_generation_config(generation_config, kwargs, WhisperGenerationConfig)); } /** * * @param {WhisperGenerationConfig} generation_config */ _retrieve_init_tokens(generation_config) { // prefix tokens are of the form: // - Multilingual: <|startoftranscript|> <|lang_id|> <|task|> [<|notimestamps|>] // - English-only: <|startoftranscript|> [<|notimestamps|>] // 1. Handle <|startoftranscript|> token const init_tokens = [generation_config.decoder_start_token_id]; // 2. Handle <|lang_id|> and <|task> tokens let language = generation_config.language; const task = generation_config.task; if (generation_config.is_multilingual) { if (!language) { // TODO: Implement language detection console.warn('No language specified - defaulting to English (en).'); language = 'en'; } // Add language token const language_code = whisper_language_to_code(language); const language_token = `<|${language_code}|>`; init_tokens.push(generation_config.lang_to_id[language_token]) // Add task token // NOTE: Defaults to 'transcribe' if no task is specified init_tokens.push(generation_config.task_to_id[task ?? 'transcribe']); } else if (language || task) { throw new Error( "Cannot specify `task` or `language` for an English-only model. If the model is intended to be multilingual, pass `is_multilingual=true` to generate, or update the generation config." ) } // 3. Handle <|notimestamps|> token if ( !generation_config.return_timestamps && generation_config.no_timestamps_token_id && init_tokens.at(-1) !== generation_config.no_timestamps_token_id ) { init_tokens.push(generation_config.no_timestamps_token_id); } else if ( generation_config.return_timestamps && init_tokens.at(-1) === generation_config.no_timestamps_token_id ) { console.warn("<|notimestamps|> prompt token is removed from generation_config since `return_timestamps` is set to `true`."); init_tokens.pop(); } // let's make sure we don't pass `null` tokens as prompt tokens return init_tokens.filter(token => token != null); } /** * Transcribes or translates log-mel input features to a sequence of auto-regressively generated token ids. * @param {import('./models/whisper/generation_whisper.js').WhisperGenerationFunctionParameters} options * @returns {Promise<ModelOutput|Tensor>} The output of the model, which can contain the generated token ids, attentions, and scores. */ async generate({ inputs = null, generation_config = null, logits_processor = null, stopping_criteria = null, // Whisper-specific options (passed to kwargs) // prompt_ids = null, // language = null, // task = null, ...kwargs }) { generation_config = this._prepare_generation_config(generation_config, kwargs); const init_tokens = kwargs.decoder_input_ids ?? this._retrieve_init_tokens(generation_config); if (generation_config.return_timestamps) { logits_processor ??= new LogitsProcessorList(); logits_processor.push( new WhisperTimeStampLogitsProcessor(generation_config, init_tokens) ); } if (generation_config.begin_suppress_tokens) { logits_processor ??= new LogitsProcessorList(); logits_processor.push( new SuppressTokensAtBeginLogitsProcessor(generation_config.begin_suppress_tokens, init_tokens.length) ); } if (generation_config.return_token_timestamps) { if (!generation_config.alignment_heads) { throw new Error( "Model generation config has no `alignment_heads`, token-level timestamps not available. " + "See https://gist.github.com/hollance/42e32852f24243b748ae6bc1f985b13a on how to add this property to the generation config." ) } if (generation_config.task === 'translate') { console.warn("Token-level timestamps may not be reliable for task 'translate'.") } generation_config.output_attentions = true; generation_config.return_dict_in_generate = true; } const outputs = await super.generate({ inputs, generation_config, logits_processor, decoder_input_ids: init_tokens, ...kwargs }); if (generation_config.return_token_timestamps) { outputs["token_timestamps"] = this._extract_token_timestamps( // @ts-expect-error TS2345 outputs, generation_config.alignment_heads, generation_config.num_frames, ); } return outputs; } /** * Calculates token-level timestamps using the encoder-decoder cross-attentions and * dynamic time-warping (DTW) to map each output token to a position in the input audio. * If `num_frames` is specified, the encoder-decoder cross-attentions will be cropped before applying DTW. * @param {Object} generate_outputs Outputs generated by the model * @param {Tensor[][]} generate_outputs.cross_attentions The cross attentions output by the model * @param {Tensor} generate_outputs.sequences The sequences output by the model * @param {number[][]} alignment_heads Alignment heads of the model * @param {number} [num_frames=null] Number of frames in the input audio. * @param {number} [time_precision=0.02] Precision of the timestamps in seconds * @returns {Tensor} tensor containing the timestamps in seconds for each predicted token */ _extract_token_timestamps(generate_outputs, alignment_heads, num_frames = null, time_precision = 0.02) { if (!generate_outputs.cross_attentions) { throw new Error( "Model outputs must contain cross attentions to extract timestamps. " + "This is most likely because the model was not exported with `output_attentions=True`." ) } if (num_frames == null) { console.warn( "`num_frames` has not been set, meaning the entire audio will be analyzed. " + "This may lead to inaccurate token-level timestamps for short audios (< 30 seconds)." ); } // @ts-expect-error TS2339 let median_filter_width = this.config.median_filter_width; if (median_filter_width === undefined) { console.warn("Model config has no `median_filter_width`, using default value of 7.") median_filter_width = 7; } // TODO: Improve batch processing const batch = generate_outputs.cross_attentions; // Create a list with `decoder_layers` elements, each a tensor of shape // (batch size, attention_heads, output length, input length). // @ts-expect-error TS2339 const cross_attentions = Array.from({ length: this.config.decoder_layers }, // Concatenate the cross attentions for each layer across sequence length dimension. (_, i) => cat(batch.map(x => x[i]), 2) ); const weights = stack(alignment_heads.map(([l, h]) => { if (l >= cross_attentions.length) { throw new Error(`Layer index ${l} is out of bounds for cross attentions (length ${cross_attentions.length}).`) } return num_frames ? cross_attentions[l].slice(null, h, null, [0, num_frames]) : cross_attentions[l].slice(null, h); })).transpose(1, 0, 2, 3); const [std, calculatedMean] = std_mean(weights, -2, 0, true); // Normalize and smoothen the weights. const smoothedWeights = weights.clone(); // [1, 8, seqLength, 1500] for (let a = 0; a < smoothedWeights.dims[0]; ++a) { const aTensor = smoothedWeights[a]; // [8, seqLength, 1500] for (let b = 0; b < aTensor.dims[0]; ++b) { const bTensor = aTensor[b]; // [seqLength, 1500] const stdTensorData = std[a][b][0].data; // [1500] const meanTensorData = calculatedMean[a][b][0].data; // [1500] for (let c = 0; c < bTensor.dims[0]; ++c) { let cTensorData = bTensor[c].data; // [1500] for (let d = 0; d < cTensorData.length; ++d) { cTensorData[d] = (cTensorData[d] - meanTensorData[d]) / stdTensorData[d] } // Apply median filter. cTensorData.set(medianFilter(cTensorData, median_filter_width)) } } } // Average the different cross-attention heads. const batchedMatrices = [mean(smoothedWeights, 1)]; const timestampsShape = generate_outputs.sequences.dims; const timestamps = new Tensor( 'float32', new Float32Array(timestampsShape[0] * timestampsShape[1]), timestampsShape ); // Perform dynamic time warping on each element of the batch. for (let batch_idx = 0; batch_idx < timestampsShape[0]; ++batch_idx) { // NOTE: Since we run only one batch at a time, we can squeeze to get the same dimensions // as the python implementation const matrix = batchedMatrices[batch_idx].neg().squeeze_(0); const [text_indices, time_indices] = dynamic_time_warping(matrix.tolist()); const diffs = Array.from({ length: text_indices.length - 1 }, (v, i) => text_indices[i + 1] - text_indices[i]); const jumps = mergeArrays([1], diffs).map(x => !!x); // convert to boolean const jump_times = []; for (let i = 0; i < jumps.length; ++i) { if (jumps[i]) { // NOTE: No point in rounding here, since we set to Float32Array later jump_times.push(time_indices[i] * time_precision); } } timestamps[batch_idx].data.set(jump_times, 1) } return timestamps; } } ////////////////////////////////////////////////// export class LiteWhisperForConditionalGeneration extends WhisperForConditionalGeneration { } ////////////////////////////////////////////////// // Moonshine models export class MoonshinePreTrainedModel extends PreTrainedModel { requires_attention_mask = false; main_input_name = 'input_values'; forward_params = [ 'input_values', 'decoder_input_ids', 'past_key_values', ]; }; /** * MoonshineModel class for training Moonshine models without a language model head. */ export class MoonshineModel extends MoonshinePreTrainedModel { } export class MoonshineForConditionalGeneration extends MoonshinePreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// /** * Vision Encoder-Decoder model based on OpenAI's GPT architecture for image captioning and other vision tasks */ export class VisionEncoderDecoderModel extends PreTrainedModel { main_input_name = 'pixel_values'; forward_params = [ // Encoder inputs 'pixel_values', // Decoder inpputs 'decoder_input_ids', 'encoder_hidden_states', 'past_key_values', ]; } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // LLaVa Models export class LlavaPreTrainedModel extends PreTrainedModel { forward_params = [ 'input_ids', 'attention_mask', 'pixel_values', 'position_ids', 'past_key_values', ]; } /** * The LLAVA model which consists of a vision backbone and a language model. */ export class LlavaForConditionalGeneration extends LlavaPreTrainedModel { _merge_input_ids_with_image_features(kwargs) { const vision_hidden_size = kwargs.image_features.dims.at(-1); const reshaped_image_hidden_states = kwargs.image_features.view(-1, vision_hidden_size); return default_merge_input_ids_with_image_features({ // @ts-ignore image_token_id: this.config.image_token_index, ...kwargs, image_features: reshaped_image_hidden_states, }) } } ////////////////////////////////////////////////// export class LlavaOnevisionForConditionalGeneration extends LlavaForConditionalGeneration { } // NOTE: extends LlavaForConditionalGeneration export class Moondream1ForConditionalGeneration extends LlavaForConditionalGeneration { } // NOTE: extends LlavaForConditionalGeneration export class Florence2PreTrainedModel extends PreTrainedModel { forward_params = [ // Encoder inputs 'input_ids', 'inputs_embeds', 'attention_mask', 'pixel_values', // Decoder inputs 'encoder_outputs', 'decoder_input_ids', 'decoder_inputs_embeds', 'decoder_attention_mask', 'past_key_values', ]; main_input_name = 'inputs_embeds'; } export class Florence2ForConditionalGeneration extends Florence2PreTrainedModel { _merge_input_ids_with_image_features({ inputs_embeds, image_features, input_ids, attention_mask, }) { return { inputs_embeds: cat([ image_features, // image embeds inputs_embeds, // task prefix embeds ], 1), attention_mask: cat([ ones(image_features.dims.slice(0, 2)), // image attention mask attention_mask, // task prefix attention mask ], 1), } } async _prepare_inputs_embeds({ input_ids, pixel_values, inputs_embeds, attention_mask }) { if (!input_ids && !pixel_values) { throw new Error('Either `input_ids` or `pixel_values` should be provided.'); } // 1. Possibly, extract the input embeddings let text_features, image_features; if (input_ids) { text_features = await this.encode_text({ input_ids }); } if (pixel_values) { image_features = await this.encode_image({ pixel_values }); } // 2. Possibly, merge text and images if (text_features && image_features) { ({ inputs_embeds, attention_mask } = this._merge_input_ids_with_image_features({ inputs_embeds: text_features, image_features, input_ids, attention_mask, })); } else { inputs_embeds = text_features || image_features; } return { inputs_embeds, attention_mask }; } async forward({ input_ids, pixel_values, attention_mask, decoder_input_ids, decoder_attention_mask, encoder_outputs, past_key_values, inputs_embeds, decoder_inputs_embeds, }) { if (!inputs_embeds) { ({ inputs_embeds, attention_mask } = await this._prepare_inputs_embeds({ input_ids, pixel_values, inputs_embeds, attention_mask })); } if (!encoder_outputs) { // Must compute encoder outputs let { last_hidden_state } = await encoderForward(this, { inputs_embeds, attention_mask }); encoder_outputs = last_hidden_state; } if (!decoder_inputs_embeds) { if (!decoder_input_ids) { throw new Error('Either `decoder_input_ids` or `decoder_inputs_embeds` should be provided.'); } decoder_inputs_embeds = await this.encode_text({ input_ids: decoder_input_ids }); } const decoderFeeds = { inputs_embeds: decoder_inputs_embeds, attention_mask: decoder_attention_mask, encoder_attention_mask: attention_mask, encoder_hidden_states: encoder_outputs, past_key_values, }; const decoder_outputs = await decoderForward(this, decoderFeeds, true); return decoder_outputs; } } export class PaliGemmaPreTrainedModel extends PreTrainedModel { forward_params = [ 'input_ids', // 'inputs_embeds', 'attention_mask', 'pixel_values', 'position_ids', 'past_key_values', ]; } export class PaliGemmaForConditionalGeneration extends PaliGemmaPreTrainedModel { _merge_input_ids_with_image_features(kwargs) { const vision_hidden_size = kwargs.image_features.dims.at(-1); const reshaped_image_hidden_states = kwargs.image_features.view(-1, vision_hidden_size); return default_merge_input_ids_with_image_features({ // @ts-ignore image_token_id: this.config.image_token_index, ...kwargs, image_features: reshaped_image_hidden_states, }) } } export class LlavaQwen2ForCausalLM extends LlavaPreTrainedModel { _merge_input_ids_with_image_features(kwargs) { const vision_hidden_size = kwargs.image_features.dims.at(-1); const reshaped_image_hidden_states = kwargs.image_features.view(-1, vision_hidden_size); return default_merge_input_ids_with_image_features({ // @ts-ignore image_token_id: this.config.image_token_index, ...kwargs, image_features: reshaped_image_hidden_states, }) } } export class Gemma3nPreTrainedModel extends PreTrainedModel { forward_params = [ 'input_ids', 'attention_mask', 'inputs_embeds', 'per_layer_inputs', 'position_ids', 'pixel_values', 'input_features', 'input_features_mask', 'past_key_values', ]; } export class Gemma3nForConditionalGeneration extends Gemma3nPreTrainedModel { async forward({ // Produced by the tokenizer/processor: input_ids = null, attention_mask = null, pixel_values = null, input_features = null, input_features_mask = null, // Used during generation: position_ids = null, inputs_embeds = null, per_layer_inputs=null, past_key_values = null, // Generic generation parameters generation_config = null, logits_processor = null, // TODO: needed? ...kwargs }) { if (!inputs_embeds || !per_layer_inputs) { // 1. Extract the text embeddings. ({ inputs_embeds, per_layer_inputs} = await sessionRun(this.sessions['embed_tokens'], { input_ids, })); if (input_ids.dims[1] !== 1) { if (pixel_values) { // Encode the image const { image_features } = await sessionRun(this.sessions['vision_encoder'], { pixel_values, }); ({ inputs_embeds, attention_mask } = this._merge_input_ids_with_image_features({ image_features, inputs_embeds, input_ids, attention_mask, })); } if (input_features) { // Encode the audio const { audio_features } = await sessionRun(this.sessions['audio_encoder'], { input_features, input_features_mask, }); ({ inputs_embeds, attention_mask } = this._merge_input_ids_with_audio_features({ audio_features, inputs_embeds, input_ids, attention_mask, })); } } } const outputs = await decoderForward(this, { inputs_embeds, per_layer_inputs, past_key_values, attention_mask, position_ids, generation_config, logits_processor, }, true); return outputs; } _merge_input_ids_with_image_features(kwargs) { const vision_hidden_size = kwargs.image_features.dims.at(-1); const reshaped_image_hidden_states = kwargs.image_features.view(-1, vision_hidden_size); return default_merge_input_ids_with_image_features({ // @ts-ignore image_token_id: this.config.image_token_id, ...kwargs, image_features: reshaped_image_hidden_states, }); } _merge_input_ids_with_audio_features(kwargs) { const audio_hidden_size = kwargs.audio_features.dims.at(-1); const reshaped_audio_features = kwargs.audio_features.view(-1, audio_hidden_size); return default_merge_input_ids_with_audio_features({ // @ts-ignore audio_token_id: this.config.audio_token_id, ...kwargs, audio_features: reshaped_audio_features, }) } } ////////////////////////////////////////////////// // Idefics3 Models export class Idefics3PreTrainedModel extends PreTrainedModel { forward_params = [ 'input_ids', 'attention_mask', 'pixel_values', 'pixel_attention_mask', 'position_ids', 'past_key_values', ]; } /** * The Idefics3 model which consists of a vision backbone and a language model. */ export class Idefics3ForConditionalGeneration extends Idefics3PreTrainedModel { async encode_image({ pixel_values, pixel_attention_mask }) { const features = (await sessionRun(this.sessions['vision_encoder'], { pixel_values, pixel_attention_mask })).image_features; return features; } _merge_input_ids_with_image_features(kwargs) { const vision_hidden_size = kwargs.image_features.dims.at(-1); const reshaped_image_hidden_states = kwargs.image_features.view(-1, vision_hidden_size); return default_merge_input_ids_with_image_features({ // @ts-ignore image_token_id: this.config.image_token_id, ...kwargs, image_features: reshaped_image_hidden_states, }) } } ////////////////////////////////////////////////// /** * The SmolVLM Model with a language modeling head. * It is made up a SigLIP vision encoder, with a language modeling head on top. */ export class SmolVLMForConditionalGeneration extends Idefics3ForConditionalGeneration { } ////////////////////////////////////////////////// export class Phi3VPreTrainedModel extends PreTrainedModel { forward_params = [ 'input_ids', 'inputs_embeds', 'attention_mask', 'position_ids', 'pixel_values', 'image_sizes', 'past_key_values', ]; } export class Phi3VForCausalLM extends Phi3VPreTrainedModel { async forward({ // Produced by the tokenizer/processor: input_ids = null, attention_mask = null, pixel_values = null, image_sizes = null, // Used during generation: position_ids = null, inputs_embeds = null, past_key_values = null, // Generic generation parameters generation_config = null, logits_processor = null, // TODO: needed? ...kwargs }) { if (!inputs_embeds) { let image_features; if (pixel_values && input_ids.dims[1] !== 1) { if (!image_sizes) { throw new Error('`image_sizes` must be provided when `pixel_values` is provided.'); } // Encode the image ({ image_features } = await sessionRun(this.sessions['vision_encoder'], { pixel_values, image_sizes, })); } else { const hidden_size = this.config.normalized_config.hidden_size; image_features = new Tensor( 'float32', [], [0, hidden_size], ); } ({ inputs_embeds } = await sessionRun(this.sessions['prepare_inputs_embeds'], { input_ids, image_features, })); } const outputs = await decoderForward(this, { inputs_embeds, past_key_values, attention_mask, position_ids, generation_config, logits_processor, }, false); return outputs; } } ////////////////////////////////////////////////// export class CLIPPreTrainedModel extends PreTrainedModel { } /** * CLIP Text and Vision Model with a projection layers on top * * **Example:** Perform zero-shot image classification with a `CLIPModel`. * * ```javascript * import { AutoTokenizer, AutoProcessor, CLIPModel, RawImage } from '@huggingface/transformers'; * * // Load tokenizer, processor, and model * let tokenizer = await AutoTokenizer.from_pretrained('Xenova/clip-vit-base-patch16'); * let processor = await AutoProcessor.from_pretrained('Xenova/clip-vit-base-patch16'); * let model = await CLIPModel.from_pretrained('Xenova/clip-vit-base-patch16'); * * // Run tokenization * let texts = ['a photo of a car', 'a photo of a football match'] * let text_inputs = tokenizer(texts, { padding: true, truncation: true }); * * // Read image and run processor * let image = await RawImage.read('https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/football-match.jpg'); * let image_inputs = await processor(image); * * // Run model with both text and pixel inputs * let output = await model({ ...text_inputs, ...image_inputs }); * // { * // logits_per_image: Tensor { * // dims: [ 1, 2 ], * // data: Float32Array(2) [ 18.579734802246094, 24.31830596923828 ], * // }, * // logits_per_text: Tensor { * // dims: [ 2, 1 ], * // data: Float32Array(2) [ 18.579734802246094, 24.31830596923828 ], * // }, * // text_embeds: Tensor { * // dims: [ 2, 512 ], * // data: Float32Array(1024) [ ... ], * // }, * // image_embeds: Tensor { * // dims: [ 1, 512 ], * // data: Float32Array(512) [ ... ], * // } * // } * ``` */ export class CLIPModel extends CLIPPreTrainedModel { } /** * The text model from CLIP without any head or projection on top. */ export class CLIPTextModel extends CLIPPreTrainedModel { /** @type {typeof PreTrainedModel.from_pretrained} */ static async from_pretrained(pretrained_model_name_or_path, options = {}) { return super.from_pretrained(pretrained_model_name_or_path, { ...options, // Update default model file name if not provided model_file_name: options.model_file_name ?? 'text_model', }); } } /** * CLIP Text Model with a projection layer on top (a linear layer on top of the pooled output) * * **Example:** Compute text embeddings with `CLIPTextModelWithProjection`. * * ```javascript * import { AutoTokenizer, CLIPTextModelWithProjection } from '@huggingface/transformers'; * * // Load tokenizer and text model * const tokenizer = await AutoTokenizer.from_pretrained('Xenova/clip-vit-base-patch16'); * const text_model = await CLIPTextModelWithProjection.from_pretrained('Xenova/clip-vit-base-patch16'); * * // Run tokenization * let texts = ['a photo of a car', 'a photo of a football match']; * let text_inputs = tokenizer(texts, { padding: true, truncation: true }); * * // Compute embeddings * const { text_embeds } = await text_model(text_inputs); * // Tensor { * // dims: [ 2, 512 ], * // type: 'float32', * // data: Float32Array(1024) [ ... ], * // size: 1024 * // } * ``` */ export class CLIPTextModelWithProjection extends CLIPPreTrainedModel { /** @type {typeof PreTrainedModel.from_pretrained} */ static async from_pretrained(pretrained_model_name_or_path, options = {}) { return super.from_pretrained(pretrained_model_name_or_path, { ...options, // Update default model file name if not provided model_file_name: options.model_file_name ?? 'text_model', }); } } /** * The vision model from CLIP without any head or projection on top. */ export class CLIPVisionModel extends CLIPPreTrainedModel { /** @type {typeof PreTrainedModel.from_pretrained} */ static async from_pretrained(pretrained_model_name_or_path, options = {}) { return super.from_pretrained(pretrained_model_name_or_path, { ...options, // Update default model file name if not provided model_file_name: options.model_file_name ?? 'vision_model', }); } } /** * CLIP Vision Model with a projection layer on top (a linear layer on top of the pooled output) * * **Example:** Compute vision embeddings with `CLIPVisionModelWithProjection`. * * ```javascript * import { AutoProcessor, CLIPVisionModelWithProjection, RawImage} from '@huggingface/transformers'; * * // Load processor and vision model * const processor = await AutoProcessor.from_pretrained('Xenova/clip-vit-base-patch16'); * const vision_model = await CLIPVisionModelWithProjection.from_pretrained('Xenova/clip-vit-base-patch16'); * * // Read image and run processor * let image = await RawImage.read('https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/football-match.jpg'); * let image_inputs = await processor(image); * * // Compute embeddings * const { image_embeds } = await vision_model(image_inputs); * // Tensor { * // dims: [ 1, 512 ], * // type: 'float32', * // data: Float32Array(512) [ ... ], * // size: 512 * // } * ``` */ export class CLIPVisionModelWithProjection extends CLIPPreTrainedModel { /** @type {typeof PreTrainedModel.from_pretrained} */ static async from_pretrained(pretrained_model_name_or_path, options = {}) { return super.from_pretrained(pretrained_model_name_or_path, { ...options, // Update default model file name if not provided model_file_name: options.model_file_name ?? 'vision_model', }); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // SigLIP models export class SiglipPreTrainedModel extends PreTrainedModel { } /** * SigLIP Text and Vision Model with a projection layers on top * * **Example:** Perform zero-shot image classification with a `SiglipModel`. * * ```javascript * import { AutoTokenizer, AutoProcessor, SiglipModel, RawImage } from '@huggingface/transformers'; * * // Load tokenizer, processor, and model * const tokenizer = await AutoTokenizer.from_pretrained('Xenova/siglip-base-patch16-224'); * const processor = await AutoProcessor.from_pretrained('Xenova/siglip-base-patch16-224'); * const model = await SiglipModel.from_pretrained('Xenova/siglip-base-patch16-224'); * * // Run tokenization * const texts = ['a photo of 2 cats', 'a photo of 2 dogs']; * const text_inputs = tokenizer(texts, { padding: 'max_length', truncation: true }); * * // Read image and run processor * const image = await RawImage.read('http://images.cocodataset.org/val2017/000000039769.jpg'); * const image_inputs = await processor(image); * * // Run model with both text and pixel inputs * const output = await model({ ...text_inputs, ...image_inputs }); * // { * // logits_per_image: Tensor { * // dims: [ 1, 2 ], * // data: Float32Array(2) [ -1.6019744873046875, -10.720091819763184 ], * // }, * // logits_per_text: Tensor { * // dims: [ 2, 1 ], * // data: Float32Array(2) [ -1.6019744873046875, -10.720091819763184 ], * // }, * // text_embeds: Tensor { * // dims: [ 2, 768 ], * // data: Float32Array(1536) [ ... ], * // }, * // image_embeds: Tensor { * // dims: [ 1, 768 ], * // data: Float32Array(768) [ ... ], * // } * // } * ``` */ export class SiglipModel extends SiglipPreTrainedModel { } /** * The text model from SigLIP without any head or projection on top. * * **Example:** Compute text embeddings with `SiglipTextModel`. * * ```javascript * import { AutoTokenizer, SiglipTextModel } from '@huggingface/transformers'; * * // Load tokenizer and text model * const tokenizer = await AutoTokenizer.from_pretrained('Xenova/siglip-base-patch16-224'); * const text_model = await SiglipTextModel.from_pretrained('Xenova/siglip-base-patch16-224'); * * // Run tokenization * const texts = ['a photo of 2 cats', 'a photo of 2 dogs']; * const text_inputs = tokenizer(texts, { padding: 'max_length', truncation: true }); * * // Compute embeddings * const { pooler_output } = await text_model(text_inputs); * // Tensor { * // dims: [ 2, 768 ], * // type: 'float32', * // data: Float32Array(1536) [ ... ], * // size: 1536 * // } * ``` */ export class SiglipTextModel extends SiglipPreTrainedModel { /** @type {typeof PreTrainedModel.from_pretrained} */ static async from_pretrained(pretrained_model_name_or_path, options = {}) { return super.from_pretrained(pretrained_model_name_or_path, { ...options, // Update default model file name if not provided model_file_name: options.model_file_name ?? 'text_model', }); } } /** * The vision model from SigLIP without any head or projection on top. * * **Example:** Compute vision embeddings with `SiglipVisionModel`. * * ```javascript * import { AutoProcessor, SiglipVisionModel, RawImage} from '@huggingface/transformers'; * * // Load processor and vision model * const processor = await AutoProcessor.from_pretrained('Xenova/siglip-base-patch16-224'); * const vision_model = await SiglipVisionModel.from_pretrained('Xenova/siglip-base-patch16-224'); * * // Read image and run processor * const image = await RawImage.read('https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/football-match.jpg'); * const image_inputs = await processor(image); * * // Compute embeddings * const { pooler_output } = await vision_model(image_inputs); * // Tensor { * // dims: [ 1, 768 ], * // type: 'float32', * // data: Float32Array(768) [ ... ], * // size: 768 * // } * ``` */ export class SiglipVisionModel extends CLIPPreTrainedModel { /** @type {typeof PreTrainedModel.from_pretrained} */ static async from_pretrained(pretrained_model_name_or_path, options = {}) { return super.from_pretrained(pretrained_model_name_or_path, { ...options, // Update default model file name if not provided model_file_name: options.model_file_name ?? 'vision_model', }); } } ////////////////////////////////////////////////// // ChineseCLIP models export class ChineseCLIPPreTrainedModel extends PreTrainedModel { } export class ChineseCLIPModel extends ChineseCLIPPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // JinaCLIP models export class JinaCLIPPreTrainedModel extends PreTrainedModel { } export class JinaCLIPModel extends JinaCLIPPreTrainedModel { async forward(model_inputs) { const missing_text_inputs = !model_inputs.input_ids; const missing_image_inputs = !model_inputs.pixel_values; if (missing_text_inputs && missing_image_inputs) { throw new Error('Either `input_ids` or `pixel_values` should be provided.'); } // If either `input_ids` or `pixel_values` aren't passed, we need to create dummy input since the model requires a value to be specified. if (missing_text_inputs) { // NOTE: We cannot pass zero-dimension tensor as input for input_ids. // Fortunately, the majority of time is spent in the vision encoder, so this shouldn't significantly impact performance. model_inputs.input_ids = ones([model_inputs.pixel_values.dims[0], 1]); } if (missing_image_inputs) { // NOTE: Since we create a zero-sized tensor, this does not increase computation time. // @ts-ignore const { image_size } = this.config.vision_config; model_inputs.pixel_values = full([0, 3, image_size, image_size], 0.0); // (pass zero-dimension tensor) } const { text_embeddings, image_embeddings, l2norm_text_embeddings, l2norm_image_embeddings } = await super.forward(model_inputs); const result = {}; if (!missing_text_inputs) { result.text_embeddings = text_embeddings; result.l2norm_text_embeddings = l2norm_text_embeddings; } if (!missing_image_inputs) { result.image_embeddings = image_embeddings; result.l2norm_image_embeddings = l2norm_image_embeddings; } return result } } export class JinaCLIPTextModel extends JinaCLIPPreTrainedModel { /** @type {typeof PreTrainedModel.from_pretrained} */ static async from_pretrained(pretrained_model_name_or_path, options = {}) { return super.from_pretrained(pretrained_model_name_or_path, { ...options, // Update default model file name if not provided model_file_name: options.model_file_name ?? 'text_model', }); } } export class JinaCLIPVisionModel extends JinaCLIPPreTrainedModel { /** @type {typeof PreTrainedModel.from_pretrained} */ static async from_pretrained(pretrained_model_name_or_path, options = {}) { return super.from_pretrained(pretrained_model_name_or_path, { ...options, // Update default model file name if not provided model_file_name: options.model_file_name ?? 'vision_model', }); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // CLIPSeg models export class CLIPSegPreTrainedModel extends PreTrainedModel { } export class CLIPSegModel extends CLIPSegPreTrainedModel { } /** * CLIPSeg model with a Transformer-based decoder on top for zero-shot and one-shot image segmentation. * * **Example:** Perform zero-shot image segmentation with a `CLIPSegForImageSegmentation` model. * * ```javascript * import { AutoTokenizer, AutoProcessor, CLIPSegForImageSegmentation, RawImage } from '@huggingface/transformers'; * * // Load tokenizer, processor, and model * const tokenizer = await AutoTokenizer.from_pretrained('Xenova/clipseg-rd64-refined'); * const processor = await AutoProcessor.from_pretrained('Xenova/clipseg-rd64-refined'); * const model = await CLIPSegForImageSegmentation.from_pretrained('Xenova/clipseg-rd64-refined'); * * // Run tokenization * const texts = ['a glass', 'something to fill', 'wood', 'a jar']; * const text_inputs = tokenizer(texts, { padding: true, truncation: true }); * * // Read image and run processor * const image = await RawImage.read('https://github.com/timojl/clipseg/blob/master/example_image.jpg?raw=true'); * const image_inputs = await processor(image); * * // Run model with both text and pixel inputs * const { logits } = await model({ ...text_inputs, ...image_inputs }); * // logits: Tensor { * // dims: [4, 352, 352], * // type: 'float32', * // data: Float32Array(495616) [ ... ], * // size: 495616 * // } * ``` * * You can visualize the predictions as follows: * ```javascript * const preds = logits * .unsqueeze_(1) * .sigmoid_() * .mul_(255) * .round_() * .to('uint8'); * * for (let i = 0; i < preds.dims[0]; ++i) { * const img = RawImage.fromTensor(preds[i]); * img.save(`prediction_${i}.png`); * } * ``` */ export class CLIPSegForImageSegmentation extends CLIPSegPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // GPT2 models export class GPT2PreTrainedModel extends PreTrainedModel { } export class GPT2Model extends GPT2PreTrainedModel { } /** * GPT-2 language model head on top of the GPT-2 base model. This model is suitable for text generation tasks. */ export class GPT2LMHeadModel extends GPT2PreTrainedModel { } // export class GPT2ForSequenceClassification extends GPT2PreTrainedModel { // TODO // } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // JAIS models export class JAISPreTrainedModel extends PreTrainedModel { } /** * The bare JAIS Model transformer outputting raw hidden-states without any specific head on top. */ export class JAISModel extends JAISPreTrainedModel { } /** * The JAIS Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). */ export class JAISLMHeadModel extends JAISPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // GPTNeo models export class GPTNeoPreTrainedModel extends PreTrainedModel { } export class GPTNeoModel extends GPTNeoPreTrainedModel { } export class GPTNeoForCausalLM extends GPTNeoPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // GPTNeoX models export class GPTNeoXPreTrainedModel extends PreTrainedModel { } export class GPTNeoXModel extends GPTNeoXPreTrainedModel { } export class GPTNeoXForCausalLM extends GPTNeoXPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // GPT-J models export class GPTJPreTrainedModel extends PreTrainedModel { } export class GPTJModel extends GPTJPreTrainedModel { } export class GPTJForCausalLM extends GPTJPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // GPTBigCode models export class GPTBigCodePreTrainedModel extends PreTrainedModel { } export class GPTBigCodeModel extends GPTBigCodePreTrainedModel { } export class GPTBigCodeForCausalLM extends GPTBigCodePreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // CodeGen models export class CodeGenPreTrainedModel extends PreTrainedModel { } /** * CodeGenModel is a class representing a code generation model without a language model head. */ export class CodeGenModel extends CodeGenPreTrainedModel { } /** * CodeGenForCausalLM is a class that represents a code generation model based on the GPT-2 architecture. It extends the `CodeGenPreTrainedModel` class. */ export class CodeGenForCausalLM extends CodeGenPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // LLama models /** * The bare LLama Model outputting raw hidden-states without any specific head on top. */ export class LlamaPreTrainedModel extends PreTrainedModel { } /** * The bare LLaMA Model outputting raw hidden-states without any specific head on top. */ export class LlamaModel extends LlamaPreTrainedModel { } export class LlamaForCausalLM extends LlamaPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Arcee models export class ArceePreTrainedModel extends PreTrainedModel { } export class ArceeModel extends ArceePreTrainedModel { } export class ArceeForCausalLM extends ArceePreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // LFM2 models export class Lfm2PreTrainedModel extends PreTrainedModel { } export class Lfm2Model extends Lfm2PreTrainedModel { } export class Lfm2ForCausalLM extends Lfm2PreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // SmolLM3 models export class SmolLM3PreTrainedModel extends PreTrainedModel { } export class SmolLM3Model extends SmolLM3PreTrainedModel { } export class SmolLM3ForCausalLM extends SmolLM3PreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Helium models export class HeliumPreTrainedModel extends PreTrainedModel { } export class HeliumModel extends HeliumPreTrainedModel { } export class HeliumForCausalLM extends HeliumPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Glm models export class GlmPreTrainedModel extends PreTrainedModel { } export class GlmModel extends GlmPreTrainedModel { } export class GlmForCausalLM extends GlmPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // EXAONE models export class ExaonePreTrainedModel extends PreTrainedModel { } export class ExaoneModel extends ExaonePreTrainedModel { } export class ExaoneForCausalLM extends ExaonePreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // MobileLLM models export class MobileLLMPreTrainedModel extends PreTrainedModel { } export class MobileLLMModel extends MobileLLMPreTrainedModel { } export class MobileLLMForCausalLM extends MobileLLMPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // OLMo models export class OlmoPreTrainedModel extends PreTrainedModel { } export class OlmoModel extends OlmoPreTrainedModel { } export class OlmoForCausalLM extends OlmoPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // OLMo2 models export class Olmo2PreTrainedModel extends PreTrainedModel { } export class Olmo2Model extends Olmo2PreTrainedModel { } export class Olmo2ForCausalLM extends Olmo2PreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Granite models export class GranitePreTrainedModel extends PreTrainedModel { } export class GraniteModel extends GranitePreTrainedModel { } export class GraniteForCausalLM extends GranitePreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Cohere models /** * The bare Cohere Model outputting raw hidden-states without any specific head on top. */ export class CoherePreTrainedModel extends PreTrainedModel { } export class CohereModel extends CoherePreTrainedModel { } export class CohereForCausalLM extends CoherePreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Gemma models /** * The bare Gemma Model outputting raw hidden-states without any specific head on top. */ export class GemmaPreTrainedModel extends PreTrainedModel { } /** * The bare Gemma Model outputting raw hidden-states without any specific head on top. */ export class GemmaModel extends GemmaPreTrainedModel { } export class GemmaForCausalLM extends GemmaPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Gemma2 models /** * The bare Gemma2 Model outputting raw hidden-states without any specific head on top. */ export class Gemma2PreTrainedModel extends PreTrainedModel { } /** * The bare Gemma2 Model outputting raw hidden-states without any specific head on top. */ export class Gemma2Model extends Gemma2PreTrainedModel { } export class Gemma2ForCausalLM extends Gemma2PreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Gemma3 models /** * The bare Gemma3 Model outputting raw hidden-states without any specific head on top. */ export class Gemma3PreTrainedModel extends PreTrainedModel { } /** * The bare Gemma3 Model outputting raw hidden-states without any specific head on top. */ export class Gemma3Model extends Gemma3PreTrainedModel { } export class Gemma3ForCausalLM extends Gemma3PreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class OpenELMPreTrainedModel extends PreTrainedModel { } export class OpenELMModel extends OpenELMPreTrainedModel { } export class OpenELMForCausalLM extends OpenELMPreTrainedModel { } ////////////////////////////////////////////////// // Qwen2 models /** * The bare Qwen2 Model outputting raw hidden-states without any specific head on top. */ export class Qwen2PreTrainedModel extends PreTrainedModel { } /** * The bare Qwen2 Model outputting raw hidden-states without any specific head on top. */ export class Qwen2Model extends Qwen2PreTrainedModel { } export class Qwen2ForCausalLM extends Qwen2PreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Qwen3 models /** * The bare Qwen3 Model outputting raw hidden-states without any specific head on top. */ export class Qwen3PreTrainedModel extends PreTrainedModel { } /** * The bare Qwen3 Model outputting raw hidden-states without any specific head on top. */ export class Qwen3Model extends Qwen3PreTrainedModel { } export class Qwen3ForCausalLM extends Qwen3PreTrainedModel { } ////////////////////////////////////////////////// export class Qwen2VLPreTrainedModel extends PreTrainedModel { forward_params = [ // Text inputs 'input_ids', 'attention_mask', 'position_ids', 'past_key_values', // Vision inputs 'pixel_values', 'image_grid_thw', ]; } export class Qwen2VLForConditionalGeneration extends Qwen2VLPreTrainedModel { /** * Calculate the 3D rope index based on image and video's temporal, height and width in LLM. * * Explanation: * Each embedding sequence contains vision embedding and text embedding or just contains text embedding. * * For pure text embedding sequence, the rotary position embedding has no difference with mordern LLMs. * Examples: * input_ids: [T T T T T], here T is for text. * temporal position_ids: [0, 1, 2, 3, 4] * height position_ids: [0, 1, 2, 3, 4] * width position_ids: [0, 1, 2, 3, 4] * * For vision and text embedding sequence, we calculate 3D rotary position embedding for vision part * and 1D rotary position embeddin for text part. * Examples: * Assume we have a video input with 3 temporal patches, 2 height patches and 2 width patches. * input_ids: [V V V V V V V V V V V V T T T T T], here V is for vision. * vision temporal position_ids: [0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2] * vision height position_ids: [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1] * vision width position_ids: [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1] * text temporal position_ids: [3, 4, 5, 6, 7] * text height position_ids: [3, 4, 5, 6, 7] * text width position_ids: [3, 4, 5, 6, 7] * Here we calculate the text start position_ids as the max vision position_ids plus 1. * * @param {Tensor} input_ids Indices of input sequence tokens in the vocabulary. Tensor of shape `(batch_size, sequence_length)`. * @param {Tensor} image_grid_thw (Optional) The temporal, height and width of feature shape of each image in LLM. Tensor of shape `(num_images, 3)`. * @param {Tensor} video_grid_thw (Optional) The temporal, height and width of feature shape of each video in LLM. Tensor of shape `(num_videos, 3)`. * @param {Tensor} attention_mask (Optional) Mask to avoid performing attention on padding token indices. Tensor of shape `(batch_size, sequence_length)`. Mask values selected in `[0, 1]`: * - 1 for tokens that are **not masked**, * - 0 for tokens that are **masked**. * @returns {[Tensor, Tensor]} [position_ids, mrope_position_deltas] with: * - position_ids: Tensor of shape `(3, batch_size, sequence_length)`. * - mrope_position_deltas: Tensor of shape `(batch_size)`. */ get_rope_index(input_ids, image_grid_thw, video_grid_thw, attention_mask) { // @ts-ignore const { vision_config, image_token_id, video_token_id, vision_start_token_id } = this.config; const spatial_merge_size = vision_config.spatial_merge_size ?? 2; const mrope_position_deltas = []; if (image_grid_thw || video_grid_thw) { let total_input_ids = input_ids.tolist(); if (!attention_mask) { attention_mask = ones_like(input_ids); } const attention_mask_list = attention_mask.tolist(); const position_ids_list = Array.from({ length: 3 }, _ => Array.from({ length: input_ids.dims[0] }, _ => Array.from({ length: input_ids.dims[1] }, _ => 1))); const image_grid_thw_list = image_grid_thw ? image_grid_thw.tolist() : []; const video_grid_thw_list = video_grid_thw ? video_grid_thw.tolist() : []; let image_index = 0; let video_index = 0; for (let i = 0; i < total_input_ids.length; ++i) { const ids = total_input_ids[i].filter((_, j) => attention_mask_list[i][j] == 1); const vision_start_indices = ids.reduce((acc, x, idx) => { if (x == vision_start_token_id) acc.push(idx); return acc; }, []); const vision_tokens = vision_start_indices.map(x => ids[x + 1]); const image_nums = vision_tokens.filter(x => x == image_token_id).length; const video_nums = vision_tokens.filter(x => x == video_token_id).length; /** @type {number[][]} */ let llm_pos_ids_list = []; let st = 0; let remain_images = image_nums; let remain_videos = video_nums; for (let j = 0; j < vision_tokens.length; ++j) { const next_image_token = ids.findIndex((x, i) => i > st && x == image_token_id); const next_video_token = ids.findIndex((x, i) => i > st && x == video_token_id); const ed_image = (remain_images > 0 && next_image_token !== -1) ? next_image_token : ids.length + 1; const ed_video = (remain_videos > 0 && next_video_token !== -1) ? next_video_token : ids.length + 1; let ed; let t, h, w; if (ed_image < ed_video) { ([t, h, w] = image_grid_thw_list[image_index]); ++image_index; --remain_images; ed = ed_image; } else { ([t, h, w] = video_grid_thw_list[video_index]); ++video_index; --remain_videos; ed = ed_video; } const [llm_grid_t, llm_grid_h, llm_grid_w] = [ Number(t), Math.floor(Number(h) / spatial_merge_size), Math.floor(Number(w) / spatial_merge_size) ] const text_len = ed - st; const st_idx = llm_pos_ids_list.length > 0 ? max(llm_pos_ids_list.at(-1))[0] + 1 : 0; llm_pos_ids_list.push( Array.from({ length: 3 * text_len }, (_, i) => st_idx + (i % text_len)) ) const offset = text_len + st_idx; const grid_size = llm_grid_t * llm_grid_h * llm_grid_w; const t_index = Array.from({ length: grid_size }, (_, i) => offset + Math.floor(i / (llm_grid_h * llm_grid_w))) const h_index = Array.from({ length: grid_size }, (_, i) => offset + Math.floor(i / llm_grid_w) % llm_grid_h) const w_index = Array.from({ length: grid_size }, (_, i) => offset + i % llm_grid_w) llm_pos_ids_list.push([t_index, h_index, w_index].flat()) st = ed + grid_size; } if (st < ids.length) { const st_idx = llm_pos_ids_list.length > 0 ? max(llm_pos_ids_list.at(-1))[0] + 1 : 0; const text_len = ids.length - st; llm_pos_ids_list.push( Array.from({ length: 3 * text_len }, (_, i) => (st_idx + (i % text_len))) ) } // NOTE: Each item in llm_pos_ids_list is an array of shape (3, text_len), // meaning to perform concatenation along dim=1, we can do the following: const num_items = llm_pos_ids_list.reduce((acc, x) => acc + x.length, 0); /** @type {number[]} */ const llm_positions = new Array(num_items); let index = 0; for (let x = 0; x < 3; ++x) { for (let y = 0; y < llm_pos_ids_list.length; ++y) { const val = llm_pos_ids_list[y]; const text_len = val.length / 3; for (let z = x * text_len; z < (x + 1) * text_len; ++z) { llm_positions[index++] = val[z]; } } } let count = 0; const attn_mask = attention_mask_list[i]; for (let y = 0; y < attn_mask.length; ++y) { if (attn_mask[y] == 1) { for (let x = 0; x < 3; ++x) { position_ids_list[x][i][y] = llm_positions[x * num_items / 3 + count]; } ++count; } } const max_llm_positions = max(llm_positions)[0]; mrope_position_deltas.push(max_llm_positions + 1 - total_input_ids[i].length); } return [ new Tensor('int64', position_ids_list.flat(Infinity), [3, input_ids.dims[0], input_ids.dims[1]]), new Tensor('int64', mrope_position_deltas, [mrope_position_deltas.length, 1]), ]; } else { // Text-only if (attention_mask) { const { data, dims } = cumsum_masked_fill(attention_mask); const position_ids = BigInt64Array.from( { length: 3 * data.length }, (_, i) => data[i % data.length] ); /** @type {bigint[]} */ const mrope_position_deltas = Array.from( { length: dims[0] }, (_, i) => max(data.subarray(dims[1] * i, dims[1] * (i + 1)))[0] + 1n + BigInt(dims[1]) ); return [ new Tensor('int64', position_ids, [3, ...dims]), new Tensor('int64', mrope_position_deltas, [mrope_position_deltas.length, 1]), ] } else { const [batch_size, seq_length] = input_ids.dims; const position_ids = BigInt64Array.from( { length: 3 * batch_size * seq_length }, (_, i) => BigInt(Math.floor(i % seq_length / batch_size)), ); return [ new Tensor('int64', position_ids, [3, ...input_ids.dims]), zeros([batch_size, 1]), ] } } } async encode_image({ pixel_values, image_grid_thw }) { const features = (await sessionRun(this.sessions['vision_encoder'], { pixel_values, grid_thw: image_grid_thw })).image_features; return features; } _merge_input_ids_with_image_features(kwargs) { return default_merge_input_ids_with_image_features({ // @ts-ignore image_token_id: this.config.image_token_id, ...kwargs }) } prepare_inputs_for_generation(input_ids, model_inputs, generation_config) { // Overwritten -- in specific circumstances we don't want to forward image inputs to the model if (model_inputs.attention_mask && !model_inputs.position_ids) { // Calculate position_ids and rope_deltas if (!model_inputs.past_key_values) { ([model_inputs.position_ids, model_inputs.rope_deltas] = this.get_rope_index( model_inputs.input_ids, model_inputs.image_grid_thw, model_inputs.video_grid_thw, model_inputs.attention_mask, )); } else { model_inputs.pixel_values = null; // model_inputs.pixel_values_videos = null; const delta = BigInt(Object.values(model_inputs.past_key_values)[0].dims.at(-2)); const rope_deltas_list = model_inputs.rope_deltas.map(x => delta + x); model_inputs.position_ids = stack([rope_deltas_list, rope_deltas_list, rope_deltas_list], 0) } } return model_inputs; } } ////////////////////////////////////////////////// // Phi models export class PhiPreTrainedModel extends PreTrainedModel { } /** * The bare Phi Model outputting raw hidden-states without any specific head on top. */ export class PhiModel extends PhiPreTrainedModel { } export class PhiForCausalLM extends PhiPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Phi3 models export class Phi3PreTrainedModel extends PreTrainedModel { } /** * The bare Phi3 Model outputting raw hidden-states without any specific head on top. */ export class Phi3Model extends Phi3PreTrainedModel { } export class Phi3ForCausalLM extends Phi3PreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Bloom models /** * The Bloom Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). */ export class BloomPreTrainedModel extends PreTrainedModel { } /** * The bare Bloom Model transformer outputting raw hidden-states without any specific head on top. */ export class BloomModel extends BloomPreTrainedModel { } /** * The Bloom Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). */ export class BloomForCausalLM extends BloomPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // MPT models export class MptPreTrainedModel extends PreTrainedModel { } /** * The bare Mpt Model transformer outputting raw hidden-states without any specific head on top. */ export class MptModel extends MptPreTrainedModel { } /** * The MPT Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). */ export class MptForCausalLM extends MptPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // OPT models export class OPTPreTrainedModel extends PreTrainedModel { } /** * The bare OPT Model outputting raw hidden-states without any specific head on top. */ export class OPTModel extends OPTPreTrainedModel { } /** * The OPT Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). */ export class OPTForCausalLM extends OPTPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class ViTPreTrainedModel extends PreTrainedModel { } export class ViTModel extends ViTPreTrainedModel { } export class ViTForImageClassification extends ViTPreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class IJepaPreTrainedModel extends PreTrainedModel { } export class IJepaModel extends IJepaPreTrainedModel { } export class IJepaForImageClassification extends IJepaPreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class VitPosePreTrainedModel extends PreTrainedModel { } /** * The VitPose model with a pose estimation head on top. */ export class VitPoseForPoseEstimation extends VitPosePreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class PvtPreTrainedModel extends PreTrainedModel { } export class PvtModel extends PvtPreTrainedModel { } export class PvtForImageClassification extends PvtPreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class ViTMAEPreTrainedModel extends PreTrainedModel { } export class ViTMAEModel extends ViTMAEPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class ViTMSNPreTrainedModel extends PreTrainedModel { } export class ViTMSNModel extends ViTMSNPreTrainedModel { } export class ViTMSNForImageClassification extends ViTMSNPreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class GroupViTPreTrainedModel extends PreTrainedModel { } export class GroupViTModel extends GroupViTPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class FastViTPreTrainedModel extends PreTrainedModel { } export class FastViTModel extends FastViTPreTrainedModel { } export class FastViTForImageClassification extends FastViTPreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class VitMattePreTrainedModel extends PreTrainedModel { } /** * ViTMatte framework leveraging any vision backbone e.g. for ADE20k, CityScapes. * * **Example:** Perform image matting with a `VitMatteForImageMatting` model. * ```javascript * import { AutoProcessor, VitMatteForImageMatting, RawImage } from '@huggingface/transformers'; * * // Load processor and model * const processor = await AutoProcessor.from_pretrained('Xenova/vitmatte-small-distinctions-646'); * const model = await VitMatteForImageMatting.from_pretrained('Xenova/vitmatte-small-distinctions-646'); * * // Load image and trimap * const image = await RawImage.fromURL('https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/vitmatte_image.png'); * const trimap = await RawImage.fromURL('https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/vitmatte_trimap.png'); * * // Prepare image + trimap for the model * const inputs = await processor(image, trimap); * * // Predict alpha matte * const { alphas } = await model(inputs); * // Tensor { * // dims: [ 1, 1, 640, 960 ], * // type: 'float32', * // size: 614400, * // data: Float32Array(614400) [ 0.9894027709960938, 0.9970508813858032, ... ] * // } * ``` * * You can visualize the alpha matte as follows: * ```javascript * import { Tensor, cat } from '@huggingface/transformers'; * * // Visualize predicted alpha matte * const imageTensor = image.toTensor(); * * // Convert float (0-1) alpha matte to uint8 (0-255) * const alphaChannel = alphas * .squeeze(0) * .mul_(255) * .clamp_(0, 255) * .round_() * .to('uint8'); * * // Concatenate original image with predicted alpha * const imageData = cat([imageTensor, alphaChannel], 0); * * // Save output image * const outputImage = RawImage.fromTensor(imageData); * outputImage.save('output.png'); * ``` */ export class VitMatteForImageMatting extends VitMattePreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new ImageMattingOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class MobileViTPreTrainedModel extends PreTrainedModel { } export class MobileViTModel extends MobileViTPreTrainedModel { } export class MobileViTForImageClassification extends MobileViTPreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } // TODO: MobileViTForSemanticSegmentation ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class MobileViTV2PreTrainedModel extends PreTrainedModel { } export class MobileViTV2Model extends MobileViTV2PreTrainedModel { } export class MobileViTV2ForImageClassification extends MobileViTV2PreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } // TODO: MobileViTV2ForSemanticSegmentation ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class OwlViTPreTrainedModel extends PreTrainedModel { } export class OwlViTModel extends OwlViTPreTrainedModel { } export class OwlViTForObjectDetection extends OwlViTPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class Owlv2PreTrainedModel extends PreTrainedModel { } export class Owlv2Model extends Owlv2PreTrainedModel { } export class Owlv2ForObjectDetection extends Owlv2PreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Beit Models export class BeitPreTrainedModel extends PreTrainedModel { } export class BeitModel extends BeitPreTrainedModel { } export class BeitForImageClassification extends BeitPreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class DetrPreTrainedModel extends PreTrainedModel { } export class DetrModel extends DetrPreTrainedModel { } export class DetrForObjectDetection extends DetrPreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new DetrObjectDetectionOutput(await super._call(model_inputs)); } } export class DetrForSegmentation extends DetrPreTrainedModel { /** * Runs the model with the provided inputs * @param {Object} model_inputs Model inputs * @returns {Promise<DetrSegmentationOutput>} Object containing segmentation outputs */ async _call(model_inputs) { return new DetrSegmentationOutput(await super._call(model_inputs)); } } export class DetrObjectDetectionOutput extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.logits Classification logits (including no-object) for all queries. * @param {Tensor} output.pred_boxes Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). * These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). */ constructor({ logits, pred_boxes }) { super(); this.logits = logits; this.pred_boxes = pred_boxes; } } export class DetrSegmentationOutput extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.logits The output logits of the model. * @param {Tensor} output.pred_boxes Predicted boxes. * @param {Tensor} output.pred_masks Predicted masks. */ constructor({ logits, pred_boxes, pred_masks }) { super(); this.logits = logits; this.pred_boxes = pred_boxes; this.pred_masks = pred_masks; } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class RTDetrPreTrainedModel extends PreTrainedModel { } export class RTDetrModel extends RTDetrPreTrainedModel { } export class RTDetrForObjectDetection extends RTDetrPreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new RTDetrObjectDetectionOutput(await super._call(model_inputs)); } } export class RTDetrObjectDetectionOutput extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.logits Classification logits (including no-object) for all queries. * @param {Tensor} output.pred_boxes Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). * These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). */ constructor({ logits, pred_boxes }) { super(); this.logits = logits; this.pred_boxes = pred_boxes; } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class RTDetrV2PreTrainedModel extends PreTrainedModel { } export class RTDetrV2Model extends RTDetrV2PreTrainedModel { } export class RTDetrV2ForObjectDetection extends RTDetrV2PreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new RTDetrV2ObjectDetectionOutput(await super._call(model_inputs)); } } export class RTDetrV2ObjectDetectionOutput extends RTDetrObjectDetectionOutput { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class RFDetrPreTrainedModel extends PreTrainedModel { } export class RFDetrModel extends RFDetrPreTrainedModel { } export class RFDetrForObjectDetection extends RFDetrPreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new RFDetrObjectDetectionOutput(await super._call(model_inputs)); } } export class RFDetrObjectDetectionOutput extends RTDetrObjectDetectionOutput { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class DFinePreTrainedModel extends PreTrainedModel { } export class DFineModel extends DFinePreTrainedModel { } export class DFineForObjectDetection extends DFinePreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new RTDetrObjectDetectionOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class TableTransformerPreTrainedModel extends PreTrainedModel { } /** * The bare Table Transformer Model (consisting of a backbone and encoder-decoder Transformer) * outputting raw hidden-states without any specific head on top. */ export class TableTransformerModel extends TableTransformerPreTrainedModel { } /** * Table Transformer Model (consisting of a backbone and encoder-decoder Transformer) * with object detection heads on top, for tasks such as COCO detection. */ export class TableTransformerForObjectDetection extends TableTransformerPreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new TableTransformerObjectDetectionOutput(await super._call(model_inputs)); } } export class TableTransformerObjectDetectionOutput extends DetrObjectDetectionOutput { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class DeiTPreTrainedModel extends PreTrainedModel { } export class DeiTModel extends DeiTPreTrainedModel { } export class DeiTForImageClassification extends DeiTPreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class HieraPreTrainedModel extends PreTrainedModel { } export class HieraModel extends HieraPreTrainedModel { } export class HieraForImageClassification extends HieraPreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// /** * An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. */ export class ResNetPreTrainedModel extends PreTrainedModel { } /** * The bare ResNet model outputting raw features without any specific head on top. */ export class ResNetModel extends ResNetPreTrainedModel { } /** * ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. */ export class ResNetForImageClassification extends ResNetPreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class SwinPreTrainedModel extends PreTrainedModel { } export class SwinModel extends SwinPreTrainedModel { } export class SwinForImageClassification extends SwinPreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } export class SwinForSemanticSegmentation extends SwinPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class Swin2SRPreTrainedModel extends PreTrainedModel { } /** * The bare Swin2SR Model transformer outputting raw hidden-states without any specific head on top. */ export class Swin2SRModel extends Swin2SRPreTrainedModel { } /** * Swin2SR Model transformer with an upsampler head on top for image super resolution and restoration. * * **Example:** Super-resolution w/ `Xenova/swin2SR-classical-sr-x2-64`. * * ```javascript * import { AutoProcessor, Swin2SRForImageSuperResolution, RawImage } from '@huggingface/transformers'; * * // Load processor and model * const model_id = 'Xenova/swin2SR-classical-sr-x2-64'; * const processor = await AutoProcessor.from_pretrained(model_id); * const model = await Swin2SRForImageSuperResolution.from_pretrained(model_id); * * // Prepare model inputs * const url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/butterfly.jpg'; * const image = await RawImage.fromURL(url); * const inputs = await processor(image); * * // Run model * const outputs = await model(inputs); * * // Convert Tensor to RawImage * const output = outputs.reconstruction.squeeze().clamp_(0, 1).mul_(255).round_().to('uint8'); * const outputImage = RawImage.fromTensor(output); * // RawImage { * // data: Uint8Array(786432) [ 41, 31, 24, ... ], * // width: 512, * // height: 512, * // channels: 3 * // } * ``` */ export class Swin2SRForImageSuperResolution extends Swin2SRPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class DPTPreTrainedModel extends PreTrainedModel { } /** * The bare DPT Model transformer outputting raw hidden-states without any specific head on top. */ export class DPTModel extends DPTPreTrainedModel { } /** * DPT Model with a depth estimation head on top (consisting of 3 convolutional layers) e.g. for KITTI, NYUv2. * * **Example:** Depth estimation w/ `Xenova/dpt-hybrid-midas`. * ```javascript * import { DPTForDepthEstimation, AutoProcessor, RawImage, interpolate_4d } from '@huggingface/transformers'; * * // Load model and processor * const model_id = 'Xenova/dpt-hybrid-midas'; * const model = await DPTForDepthEstimation.from_pretrained(model_id); * const processor = await AutoProcessor.from_pretrained(model_id); * * // Load image from URL * const url = 'http://images.cocodataset.org/val2017/000000039769.jpg'; * const image = await RawImage.read(url); * * // Prepare image for the model * const inputs = await processor(image); * * // Run model * const { predicted_depth } = await model(inputs); * * // Interpolate to original size * const prediction = (await interpolate_4d(predicted_depth.unsqueeze(1), { * size: image.size.reverse(), * mode: 'bilinear', * })).squeeze(1); * * // Visualize the prediction * const min = prediction.min().item(); * const max = prediction.max().item(); * const formatted = prediction.sub_(min).div_(max - min).mul_(255).to('uint8'); * const depth = RawImage.fromTensor(formatted); * // RawImage { * // data: Uint8Array(307200) [ 85, 85, 84, ... ], * // width: 640, * // height: 480, * // channels: 1 * // } * ``` */ export class DPTForDepthEstimation extends DPTPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class DepthAnythingPreTrainedModel extends PreTrainedModel { } /** * Depth Anything Model with a depth estimation head on top (consisting of 3 convolutional layers) e.g. for KITTI, NYUv2. */ export class DepthAnythingForDepthEstimation extends DepthAnythingPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class SapiensPreTrainedModel extends PreTrainedModel { } export class SapiensForSemanticSegmentation extends SapiensPreTrainedModel { } export class SapiensForDepthEstimation extends SapiensPreTrainedModel { } export class SapiensForNormalEstimation extends SapiensPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class DepthProPreTrainedModel extends PreTrainedModel { } export class DepthProForDepthEstimation extends DepthProPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class Metric3DPreTrainedModel extends PreTrainedModel { } export class Metric3DForDepthEstimation extends Metric3DPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class Metric3Dv2PreTrainedModel extends PreTrainedModel { } export class Metric3Dv2ForDepthEstimation extends Metric3Dv2PreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class MaskFormerPreTrainedModel extends PreTrainedModel { } export class MaskFormerModel extends MaskFormerPreTrainedModel { } export class MaskFormerForInstanceSegmentation extends MaskFormerPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class GLPNPreTrainedModel extends PreTrainedModel { } /** * The bare GLPN encoder (Mix-Transformer) outputting raw hidden-states without any specific head on top. */ export class GLPNModel extends GLPNPreTrainedModel { } /** * import { GLPNForDepthEstimation, AutoProcessor, RawImage, interpolate_4d } from '@huggingface/transformers'; * * // Load model and processor * const model_id = 'Xenova/glpn-kitti'; * const model = await GLPNForDepthEstimation.from_pretrained(model_id); * const processor = await AutoProcessor.from_pretrained(model_id); * * // Load image from URL * const url = 'http://images.cocodataset.org/val2017/000000039769.jpg'; * const image = await RawImage.read(url); * * // Prepare image for the model * const inputs = await processor(image); * * // Run model * const { predicted_depth } = await model(inputs); * * // Interpolate to original size * const prediction = (await interpolate_4d(predicted_depth.unsqueeze(1), { * size: image.size.reverse(), * mode: 'bilinear', * })).squeeze(1); * * // Visualize the prediction * const min = prediction.min().item(); * const max = prediction.max().item(); * const formatted = prediction.sub_(min).div_(max - min).mul_(255).to('uint8'); * const depth = RawImage.fromTensor(formatted); * // RawImage { * // data: Uint8Array(307200) [ 85, 85, 84, ... ], * // width: 640, * // height: 480, * // channels: 1 * // } * ``` */ export class GLPNForDepthEstimation extends GLPNPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class DonutSwinPreTrainedModel extends PreTrainedModel { } /** * The bare Donut Swin Model transformer outputting raw hidden-states without any specific head on top. * * **Example:** Step-by-step Document Parsing. * * ```javascript * import { AutoProcessor, AutoTokenizer, AutoModelForVision2Seq, RawImage } from '@huggingface/transformers'; * * // Choose model to use * const model_id = 'Xenova/donut-base-finetuned-cord-v2'; * * // Prepare image inputs * const processor = await AutoProcessor.from_pretrained(model_id); * const url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/receipt.png'; * const image = await RawImage.read(url); * const image_inputs = await processor(image); * * // Prepare decoder inputs * const tokenizer = await AutoTokenizer.from_pretrained(model_id); * const task_prompt = '<s_cord-v2>'; * const decoder_input_ids = tokenizer(task_prompt, { * add_special_tokens: false, * }).input_ids; * * // Create the model * const model = await AutoModelForVision2Seq.from_pretrained(model_id); * * // Run inference * const output = await model.generate(image_inputs.pixel_values, { * decoder_input_ids, * max_length: model.config.decoder.max_position_embeddings, * }); * * // Decode output * const decoded = tokenizer.batch_decode(output)[0]; * // <s_cord-v2><s_menu><s_nm> CINNAMON SUGAR</s_nm><s_unitprice> 17,000</s_unitprice><s_cnt> 1 x</s_cnt><s_price> 17,000</s_price></s_menu><s_sub_total><s_subtotal_price> 17,000</s_subtotal_price></s_sub_total><s_total><s_total_price> 17,000</s_total_price><s_cashprice> 20,000</s_cashprice><s_changeprice> 3,000</s_changeprice></s_total></s> * ``` * * **Example:** Step-by-step Document Visual Question Answering (DocVQA) * * ```javascript * import { AutoProcessor, AutoTokenizer, AutoModelForVision2Seq, RawImage } from '@huggingface/transformers'; * * // Choose model to use * const model_id = 'Xenova/donut-base-finetuned-docvqa'; * * // Prepare image inputs * const processor = await AutoProcessor.from_pretrained(model_id); * const url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/invoice.png'; * const image = await RawImage.read(url); * const image_inputs = await processor(image); * * // Prepare decoder inputs * const tokenizer = await AutoTokenizer.from_pretrained(model_id); * const question = 'What is the invoice number?'; * const task_prompt = `<s_docvqa><s_question>${question}</s_question><s_answer>`; * const decoder_input_ids = tokenizer(task_prompt, { * add_special_tokens: false, * }).input_ids; * * // Create the model * const model = await AutoModelForVision2Seq.from_pretrained(model_id); * * // Run inference * const output = await model.generate(image_inputs.pixel_values, { * decoder_input_ids, * max_length: model.config.decoder.max_position_embeddings, * }); * * // Decode output * const decoded = tokenizer.batch_decode(output)[0]; * // <s_docvqa><s_question> What is the invoice number?</s_question><s_answer> us-001</s_answer></s> * ``` */ export class DonutSwinModel extends DonutSwinPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class ConvNextPreTrainedModel extends PreTrainedModel { } /** * The bare ConvNext model outputting raw features without any specific head on top. */ export class ConvNextModel extends ConvNextPreTrainedModel { } /** * ConvNext Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. */ export class ConvNextForImageClassification extends ConvNextPreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class ConvNextV2PreTrainedModel extends PreTrainedModel { } /** * The bare ConvNextV2 model outputting raw features without any specific head on top. */ export class ConvNextV2Model extends ConvNextV2PreTrainedModel { } /** * ConvNextV2 Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. */ export class ConvNextV2ForImageClassification extends ConvNextV2PreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class Dinov2PreTrainedModel extends PreTrainedModel { } /** * The bare DINOv2 Model transformer outputting raw hidden-states without any specific head on top. */ export class Dinov2Model extends Dinov2PreTrainedModel { } /** * Dinov2 Model transformer with an image classification head on top (a linear layer on top of the final hidden state of the [CLS] token) e.g. for ImageNet. */ export class Dinov2ForImageClassification extends Dinov2PreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class Dinov2WithRegistersPreTrainedModel extends PreTrainedModel { } /** * The bare Dinov2WithRegisters Model transformer outputting raw hidden-states without any specific head on top. */ export class Dinov2WithRegistersModel extends Dinov2WithRegistersPreTrainedModel { } /** * Dinov2WithRegisters Model transformer with an image classification head on top (a linear layer on top of the final hidden state of the [CLS] token) e.g. for ImageNet. */ export class Dinov2WithRegistersForImageClassification extends Dinov2WithRegistersPreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class DINOv3ViTPreTrainedModel extends PreTrainedModel { } export class DINOv3ViTModel extends DINOv3ViTPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class DINOv3ConvNextPreTrainedModel extends PreTrainedModel { } export class DINOv3ConvNextModel extends DINOv3ConvNextPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class GroundingDinoPreTrainedModel extends PreTrainedModel { } export class GroundingDinoForObjectDetection extends GroundingDinoPreTrainedModel { } ////////////////////////////////////////////////// export class YolosPreTrainedModel extends PreTrainedModel { } export class YolosModel extends YolosPreTrainedModel { } export class YolosForObjectDetection extends YolosPreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new YolosObjectDetectionOutput(await super._call(model_inputs)); } } export class YolosObjectDetectionOutput extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.logits Classification logits (including no-object) for all queries. * @param {Tensor} output.pred_boxes Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). * These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). */ constructor({ logits, pred_boxes }) { super(); this.logits = logits; this.pred_boxes = pred_boxes; } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class SamPreTrainedModel extends PreTrainedModel { } /** * Segment Anything Model (SAM) for generating segmentation masks, given an input image * and optional 2D location and bounding boxes. * * **Example:** Perform mask generation w/ `Xenova/sam-vit-base`. * ```javascript * import { SamModel, AutoProcessor, RawImage } from '@huggingface/transformers'; * * const model = await SamModel.from_pretrained('Xenova/sam-vit-base'); * const processor = await AutoProcessor.from_pretrained('Xenova/sam-vit-base'); * * const img_url = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'; * const raw_image = await RawImage.read(img_url); * const input_points = [[[450, 600]]] // 2D localization of a window * * const inputs = await processor(raw_image, { input_points }); * const outputs = await model(inputs); * * const masks = await processor.post_process_masks(outputs.pred_masks, inputs.original_sizes, inputs.reshaped_input_sizes); * // [ * // Tensor { * // dims: [ 1, 3, 1764, 2646 ], * // type: 'bool', * // data: Uint8Array(14002632) [ ... ], * // size: 14002632 * // } * // ] * const scores = outputs.iou_scores; * // Tensor { * // dims: [ 1, 1, 3 ], * // type: 'float32', * // data: Float32Array(3) [ * // 0.8892380595207214, * // 0.9311248064041138, * // 0.983696699142456 * // ], * // size: 3 * // } * ``` */ export class SamModel extends SamPreTrainedModel { /** * Compute image embeddings and positional image embeddings, given the pixel values of an image. * @param {Object} model_inputs Object containing the model inputs. * @param {Tensor} model_inputs.pixel_values Pixel values obtained using a `SamProcessor`. * @returns {Promise<{ image_embeddings: Tensor, image_positional_embeddings: Tensor }>} The image embeddings and positional image embeddings. */ async get_image_embeddings({ pixel_values }) { // in: // - pixel_values: tensor.float32[batch_size,3,1024,1024] // // out: // - image_embeddings: tensor.float32[batch_size,256,64,64] // - image_positional_embeddings: tensor.float32[batch_size,256,64,64] return await encoderForward(this, { pixel_values }) } /** * @typedef {Object} SamModelInputs Object containing the model inputs. * @property {Tensor} pixel_values Pixel values as a Tensor with shape `(batch_size, num_channels, height, width)`. * These can be obtained using a `SamProcessor`. * @property {Tensor} [input_points] Input 2D spatial points with shape `(batch_size, num_points, 2)`. * This is used by the prompt encoder to encode the prompt. * @property {Tensor} [input_labels] Input labels for the points, as a Tensor of shape `(batch_size, point_batch_size, num_points)`. * This is used by the prompt encoder to encode the prompt. There are 4 types of labels: * - `1`: the point is a point that contains the object of interest * - `0`: the point is a point that does not contain the object of interest * - `-1`: the point corresponds to the background * - `-10`: the point is a padding point, thus should be ignored by the prompt encoder * @property {Tensor} [input_boxes] Input bounding boxes with shape `(batch_size, num_boxes, 4)`. * @property {Tensor} [image_embeddings] Image embeddings used by the mask decoder. * @property {Tensor} [image_positional_embeddings] Image positional embeddings used by the mask decoder. */ /** * @param {SamModelInputs} model_inputs Object containing the model inputs. * @returns {Promise<Object>} The output of the model. */ async forward(model_inputs) { if (!model_inputs.image_embeddings || !model_inputs.image_positional_embeddings) { // Compute the image embeddings if they are missing model_inputs = { ...model_inputs, ...(await this.get_image_embeddings(model_inputs)) } } if (!model_inputs.input_labels && model_inputs.input_points) { // Set default input labels if they are missing const shape = model_inputs.input_points.dims.slice(0, -1); const numElements = shape.reduce((a, b) => a * b, 1); model_inputs.input_labels = new Tensor( 'int64', new BigInt64Array(numElements).fill(1n), shape ); } const decoder_inputs = { image_embeddings: model_inputs.image_embeddings, image_positional_embeddings: model_inputs.image_positional_embeddings, }; if (model_inputs.input_points) { decoder_inputs.input_points = model_inputs.input_points; } if (model_inputs.input_labels) { decoder_inputs.input_labels = model_inputs.input_labels; } if (model_inputs.input_boxes) { decoder_inputs.input_boxes = model_inputs.input_boxes; } // Returns: // - iou_scores: tensor.float32[batch_size,point_batch_size,3] // - pred_masks: tensor.float32[batch_size,point_batch_size,3,256,256] return await sessionRun(this.sessions['prompt_encoder_mask_decoder'], decoder_inputs); } /** * Runs the model with the provided inputs * @param {Object} model_inputs Model inputs * @returns {Promise<SamImageSegmentationOutput>} Object containing segmentation outputs */ async _call(model_inputs) { return new SamImageSegmentationOutput(await super._call(model_inputs)); } } /** * Base class for Segment-Anything model's output. */ export class SamImageSegmentationOutput extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.iou_scores The output logits of the model. * @param {Tensor} output.pred_masks Predicted boxes. */ constructor({ iou_scores, pred_masks }) { super(); this.iou_scores = iou_scores; this.pred_masks = pred_masks; } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // MarianMT models export class MarianPreTrainedModel extends PreTrainedModel { }; export class MarianModel extends MarianPreTrainedModel { } export class MarianMTModel extends MarianPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // M2M100 models export class M2M100PreTrainedModel extends PreTrainedModel { }; export class M2M100Model extends M2M100PreTrainedModel { } export class M2M100ForConditionalGeneration extends M2M100PreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Wav2Vec2 models export class Wav2Vec2PreTrainedModel extends PreTrainedModel { }; /** * The bare Wav2Vec2 Model transformer outputting raw hidden-states without any specific head on top. * * **Example:** Load and run a `Wav2Vec2Model` for feature extraction. * * ```javascript * import { AutoProcessor, AutoModel, read_audio } from '@huggingface/transformers'; * * // Read and preprocess audio * const processor = await AutoProcessor.from_pretrained('Xenova/mms-300m'); * const audio = await read_audio('https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac', 16000); * const inputs = await processor(audio); * * // Run model with inputs * const model = await AutoModel.from_pretrained('Xenova/mms-300m'); * const output = await model(inputs); * // { * // last_hidden_state: Tensor { * // dims: [ 1, 1144, 1024 ], * // type: 'float32', * // data: Float32Array(1171456) [ ... ], * // size: 1171456 * // } * // } * ``` */ export class Wav2Vec2Model extends Wav2Vec2PreTrainedModel { } export class Wav2Vec2ForCTC extends Wav2Vec2PreTrainedModel { /** * @param {Object} model_inputs * @param {Tensor} model_inputs.input_values Float values of input raw speech waveform. * @param {Tensor} model_inputs.attention_mask Mask to avoid performing convolution and attention on padding token indices. Mask values selected in [0, 1] */ async _call(model_inputs) { return new CausalLMOutput(await super._call(model_inputs)); } } export class Wav2Vec2ForSequenceClassification extends Wav2Vec2PreTrainedModel { /** * Calls the model on new inputs. * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } /** * Wav2Vec2 Model with a frame classification head on top for tasks like Speaker Diarization. */ export class Wav2Vec2ForAudioFrameClassification extends Wav2Vec2PreTrainedModel { /** * Calls the model on new inputs. * @param {Object} model_inputs The inputs to the model. * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new TokenClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // PyAnnote models export class PyAnnotePreTrainedModel extends PreTrainedModel { }; /** * The bare PyAnnote Model transformer outputting raw hidden-states without any specific head on top. */ export class PyAnnoteModel extends PyAnnotePreTrainedModel { } /** * PyAnnote Model with a frame classification head on top for tasks like Speaker Diarization. * * **Example:** Load and run a `PyAnnoteForAudioFrameClassification` for speaker diarization. * * ```javascript * import { AutoProcessor, AutoModelForAudioFrameClassification, read_audio } from '@huggingface/transformers'; * * // Load model and processor * const model_id = 'onnx-community/pyannote-segmentation-3.0'; * const model = await AutoModelForAudioFrameClassification.from_pretrained(model_id); * const processor = await AutoProcessor.from_pretrained(model_id); * * // Read and preprocess audio * const url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/mlk.wav'; * const audio = await read_audio(url, processor.feature_extractor.config.sampling_rate); * const inputs = await processor(audio); * * // Run model with inputs * const { logits } = await model(inputs); * // { * // logits: Tensor { * // dims: [ 1, 767, 7 ], // [batch_size, num_frames, num_classes] * // type: 'float32', * // data: Float32Array(5369) [ ... ], * // size: 5369 * // } * // } * * const result = processor.post_process_speaker_diarization(logits, audio.length); * // [ * // [ * // { id: 0, start: 0, end: 1.0512535626298245, confidence: 0.8220156481664611 }, * // { id: 2, start: 1.0512535626298245, end: 2.3398869619825127, confidence: 0.9008811707860472 }, * // ... * // ] * // ] * * // Display result * console.table(result[0], ['start', 'end', 'id', 'confidence']); * // ┌─────────┬────────────────────┬────────────────────┬────┬─────────────────────┐ * // │ (index) │ start │ end │ id │ confidence │ * // ├─────────┼────────────────────┼────────────────────┼────┼─────────────────────┤ * // │ 0 │ 0 │ 1.0512535626298245 │ 0 │ 0.8220156481664611 │ * // │ 1 │ 1.0512535626298245 │ 2.3398869619825127 │ 2 │ 0.9008811707860472 │ * // │ 2 │ 2.3398869619825127 │ 3.5946089560890773 │ 0 │ 0.7521651315796233 │ * // │ 3 │ 3.5946089560890773 │ 4.578039708226655 │ 2 │ 0.8491978128022479 │ * // │ 4 │ 4.578039708226655 │ 4.594995410849717 │ 0 │ 0.2935352600416393 │ * // │ 5 │ 4.594995410849717 │ 6.121008646925269 │ 3 │ 0.6788051309866024 │ * // │ 6 │ 6.121008646925269 │ 6.256654267909762 │ 0 │ 0.37125512393851134 │ * // │ 7 │ 6.256654267909762 │ 8.630452635138397 │ 2 │ 0.7467035186353542 │ * // │ 8 │ 8.630452635138397 │ 10.088643060721703 │ 0 │ 0.7689364814666032 │ * // │ 9 │ 10.088643060721703 │ 12.58113134631177 │ 2 │ 0.9123324509131324 │ * // │ 10 │ 12.58113134631177 │ 13.005023911888312 │ 0 │ 0.4828358177572041 │ * // └─────────┴────────────────────┴────────────────────┴────┴─────────────────────┘ * ``` */ export class PyAnnoteForAudioFrameClassification extends PyAnnotePreTrainedModel { /** * Calls the model on new inputs. * @param {Object} model_inputs The inputs to the model. * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new TokenClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // WeSpeakerResNet models export class WeSpeakerResNetPreTrainedModel extends PreTrainedModel { }; export class WeSpeakerResNetModel extends WeSpeakerResNetPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // UniSpeech models export class UniSpeechPreTrainedModel extends PreTrainedModel { }; /** * The bare UniSpeech Model transformer outputting raw hidden-states without any specific head on top. */ export class UniSpeechModel extends UniSpeechPreTrainedModel { } /** * UniSpeech Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC). */ export class UniSpeechForCTC extends UniSpeechPreTrainedModel { /** * @param {Object} model_inputs * @param {Tensor} model_inputs.input_values Float values of input raw speech waveform. * @param {Tensor} model_inputs.attention_mask Mask to avoid performing convolution and attention on padding token indices. Mask values selected in [0, 1] */ async _call(model_inputs) { return new CausalLMOutput(await super._call(model_inputs)); } } /** * UniSpeech Model with a sequence classification head on top (a linear layer over the pooled output). */ export class UniSpeechForSequenceClassification extends UniSpeechPreTrainedModel { /** * Calls the model on new inputs. * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // UniSpeechSat models export class UniSpeechSatPreTrainedModel extends PreTrainedModel { }; /** * The bare UniSpeechSat Model transformer outputting raw hidden-states without any specific head on top. */ export class UniSpeechSatModel extends UniSpeechSatPreTrainedModel { } /** * UniSpeechSat Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC). */ export class UniSpeechSatForCTC extends UniSpeechSatPreTrainedModel { /** * @param {Object} model_inputs * @param {Tensor} model_inputs.input_values Float values of input raw speech waveform. * @param {Tensor} model_inputs.attention_mask Mask to avoid performing convolution and attention on padding token indices. Mask values selected in [0, 1] */ async _call(model_inputs) { return new CausalLMOutput(await super._call(model_inputs)); } } /** * UniSpeechSat Model with a sequence classification head on top (a linear layer over the pooled output). */ export class UniSpeechSatForSequenceClassification extends UniSpeechSatPreTrainedModel { /** * Calls the model on new inputs. * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } /** * UniSpeechSat Model with a frame classification head on top for tasks like Speaker Diarization. */ export class UniSpeechSatForAudioFrameClassification extends UniSpeechSatPreTrainedModel { /** * Calls the model on new inputs. * @param {Object} model_inputs The inputs to the model. * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new TokenClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Wav2Vec2Bert models export class Wav2Vec2BertPreTrainedModel extends PreTrainedModel { }; /** * The bare Wav2Vec2Bert Model transformer outputting raw hidden-states without any specific head on top. */ export class Wav2Vec2BertModel extends Wav2Vec2BertPreTrainedModel { } /** * Wav2Vec2Bert Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC). */ export class Wav2Vec2BertForCTC extends Wav2Vec2BertPreTrainedModel { /** * @param {Object} model_inputs * @param {Tensor} model_inputs.input_features Float values of input mel-spectrogram. * @param {Tensor} model_inputs.attention_mask Mask to avoid performing convolution and attention on padding token indices. Mask values selected in [0, 1] */ async _call(model_inputs) { return new CausalLMOutput(await super._call(model_inputs)); } } /** * Wav2Vec2Bert Model with a sequence classification head on top (a linear layer over the pooled output). */ export class Wav2Vec2BertForSequenceClassification extends Wav2Vec2BertPreTrainedModel { /** * Calls the model on new inputs. * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Hubert models export class HubertPreTrainedModel extends PreTrainedModel { } /** * The bare Hubert Model transformer outputting raw hidden-states without any specific head on top. * * **Example:** Load and run a `HubertModel` for feature extraction. * * ```javascript * import { AutoProcessor, AutoModel, read_audio } from '@huggingface/transformers'; * * // Read and preprocess audio * const processor = await AutoProcessor.from_pretrained('Xenova/hubert-base-ls960'); * const audio = await read_audio('https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/jfk.wav', 16000); * const inputs = await processor(audio); * * // Load and run model with inputs * const model = await AutoModel.from_pretrained('Xenova/hubert-base-ls960'); * const output = await model(inputs); * // { * // last_hidden_state: Tensor { * // dims: [ 1, 549, 768 ], * // type: 'float32', * // data: Float32Array(421632) [0.0682469978928566, 0.08104046434164047, -0.4975186586380005, ...], * // size: 421632 * // } * // } * ``` */ export class HubertModel extends Wav2Vec2PreTrainedModel { } /** * Hubert Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC). */ export class HubertForCTC extends Wav2Vec2PreTrainedModel { /** * @param {Object} model_inputs * @param {Tensor} model_inputs.input_values Float values of input raw speech waveform. * @param {Tensor} model_inputs.attention_mask Mask to avoid performing convolution and attention on padding token indices. Mask values selected in [0, 1] */ async _call(model_inputs) { return new CausalLMOutput(await super._call(model_inputs)); } } /** * Hubert Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like SUPERB Keyword Spotting. */ export class HubertForSequenceClassification extends Wav2Vec2PreTrainedModel { /** * Calls the model on new inputs. * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // WavLM models /** * An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. */ export class WavLMPreTrainedModel extends PreTrainedModel { }; /** * The bare WavLM Model transformer outputting raw hidden-states without any specific head on top. * * **Example:** Load and run a `WavLMModel` for feature extraction. * * ```javascript * import { AutoProcessor, AutoModel, read_audio } from '@huggingface/transformers'; * * // Read and preprocess audio * const processor = await AutoProcessor.from_pretrained('Xenova/wavlm-base'); * const audio = await read_audio('https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/jfk.wav', 16000); * const inputs = await processor(audio); * * // Run model with inputs * const model = await AutoModel.from_pretrained('Xenova/wavlm-base'); * const output = await model(inputs); * // { * // last_hidden_state: Tensor { * // dims: [ 1, 549, 768 ], * // type: 'float32', * // data: Float32Array(421632) [-0.349443256855011, -0.39341306686401367, 0.022836603224277496, ...], * // size: 421632 * // } * // } * ``` */ export class WavLMModel extends WavLMPreTrainedModel { } /** * WavLM Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC). */ export class WavLMForCTC extends WavLMPreTrainedModel { /** * @param {Object} model_inputs * @param {Tensor} model_inputs.input_values Float values of input raw speech waveform. * @param {Tensor} model_inputs.attention_mask Mask to avoid performing convolution and attention on padding token indices. Mask values selected in [0, 1] */ async _call(model_inputs) { return new CausalLMOutput(await super._call(model_inputs)); } } /** * WavLM Model with a sequence classification head on top (a linear layer over the pooled output). */ export class WavLMForSequenceClassification extends WavLMPreTrainedModel { /** * Calls the model on new inputs. * @param {Object} model_inputs The inputs to the model. * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } /** * WavLM Model with an XVector feature extraction head on top for tasks like Speaker Verification. * * **Example:** Extract speaker embeddings with `WavLMForXVector`. * ```javascript * import { AutoProcessor, AutoModel, read_audio } from '@huggingface/transformers'; * * // Read and preprocess audio * const processor = await AutoProcessor.from_pretrained('Xenova/wavlm-base-plus-sv'); * const url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/jfk.wav'; * const audio = await read_audio(url, 16000); * const inputs = await processor(audio); * * // Run model with inputs * const model = await AutoModel.from_pretrained('Xenova/wavlm-base-plus-sv'); * const outputs = await model(inputs); * // { * // logits: Tensor { * // dims: [ 1, 512 ], * // type: 'float32', * // data: Float32Array(512) [0.5847219228744507, ...], * // size: 512 * // }, * // embeddings: Tensor { * // dims: [ 1, 512 ], * // type: 'float32', * // data: Float32Array(512) [-0.09079201519489288, ...], * // size: 512 * // } * // } * ``` */ export class WavLMForXVector extends WavLMPreTrainedModel { /** * Calls the model on new inputs. * @param {Object} model_inputs The inputs to the model. * @returns {Promise<XVectorOutput>} An object containing the model's output logits and speaker embeddings. */ async _call(model_inputs) { return new XVectorOutput(await super._call(model_inputs)); } } /** * WavLM Model with a frame classification head on top for tasks like Speaker Diarization. * * **Example:** Perform speaker diarization with `WavLMForAudioFrameClassification`. * ```javascript * import { AutoProcessor, AutoModelForAudioFrameClassification, read_audio } from '@huggingface/transformers'; * * // Read and preprocess audio * const processor = await AutoProcessor.from_pretrained('Xenova/wavlm-base-plus-sd'); * const url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/jfk.wav'; * const audio = await read_audio(url, 16000); * const inputs = await processor(audio); * * // Run model with inputs * const model = await AutoModelForAudioFrameClassification.from_pretrained('Xenova/wavlm-base-plus-sd'); * const { logits } = await model(inputs); * // { * // logits: Tensor { * // dims: [ 1, 549, 2 ], // [batch_size, num_frames, num_speakers] * // type: 'float32', * // data: Float32Array(1098) [-3.5301010608673096, ...], * // size: 1098 * // } * // } * * const labels = logits[0].sigmoid().tolist().map( * frames => frames.map(speaker => speaker > 0.5 ? 1 : 0) * ); * console.log(labels); // labels is a one-hot array of shape (num_frames, num_speakers) * // [ * // [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], * // [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], * // [0, 0], [0, 1], [0, 1], [0, 1], [0, 1], [0, 1], * // ... * // ] * ``` */ export class WavLMForAudioFrameClassification extends WavLMPreTrainedModel { /** * Calls the model on new inputs. * @param {Object} model_inputs The inputs to the model. * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for sequence classification. */ async _call(model_inputs) { return new TokenClassifierOutput(await super._call(model_inputs)); } } export class StyleTextToSpeech2PreTrainedModel extends PreTrainedModel { } export class StyleTextToSpeech2Model extends StyleTextToSpeech2PreTrainedModel { } ////////////////////////////////////////////////// // SpeechT5 models /** * An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. */ export class SpeechT5PreTrainedModel extends PreTrainedModel { }; /** * The bare SpeechT5 Encoder-Decoder Model outputting raw hidden-states without any specific pre- or post-nets. */ export class SpeechT5Model extends SpeechT5PreTrainedModel { }; /** * SpeechT5 Model with a speech encoder and a text decoder. * * **Example:** Generate speech from text with `SpeechT5ForSpeechToText`. * ```javascript * import { AutoTokenizer, AutoProcessor, SpeechT5ForTextToSpeech, SpeechT5HifiGan, Tensor } from '@huggingface/transformers'; * * // Load the tokenizer and processor * const tokenizer = await AutoTokenizer.from_pretrained('Xenova/speecht5_tts'); * const processor = await AutoProcessor.from_pretrained('Xenova/speecht5_tts'); * * // Load the models * // NOTE: We use the full-precision versions as they are more accurate * const model = await SpeechT5ForTextToSpeech.from_pretrained('Xenova/speecht5_tts', { dtype: 'fp32' }); * const vocoder = await SpeechT5HifiGan.from_pretrained('Xenova/speecht5_hifigan', { dtype: 'fp32' }); * * // Load speaker embeddings from URL * const speaker_embeddings_data = new Float32Array( * await (await fetch('https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/speaker_embeddings.bin')).arrayBuffer() * ); * const speaker_embeddings = new Tensor( * 'float32', * speaker_embeddings_data, * [1, speaker_embeddings_data.length] * ) * * // Run tokenization * const { input_ids } = tokenizer('Hello, my dog is cute'); * * // Generate waveform * const { waveform } = await model.generate_speech(input_ids, speaker_embeddings, { vocoder }); * console.log(waveform) * // Tensor { * // dims: [ 26112 ], * // type: 'float32', * // size: 26112, * // data: Float32Array(26112) [ -0.00043630177970044315, -0.00018082228780258447, ... ], * // } * ``` */ export class SpeechT5ForSpeechToText extends SpeechT5PreTrainedModel { } /** * SpeechT5 Model with a text encoder and a speech decoder. */ export class SpeechT5ForTextToSpeech extends SpeechT5PreTrainedModel { /** * @typedef {Object} SpeechOutput * @property {Tensor} [spectrogram] The predicted log-mel spectrogram of shape * `(output_sequence_length, config.num_mel_bins)`. Returned when no `vocoder` is provided * @property {Tensor} [waveform] The predicted waveform of shape `(num_frames,)`. Returned when a `vocoder` is provided. * @property {Tensor} [cross_attentions] The outputs of the decoder's cross-attention layers of shape * `(config.decoder_layers, config.decoder_attention_heads, output_sequence_length, input_sequence_length)`. returned when `output_cross_attentions` is `true`. */ /** * Converts a sequence of input tokens into a sequence of mel spectrograms, which are subsequently turned into a speech waveform using a vocoder. * @param {Tensor} input_values Indices of input sequence tokens in the vocabulary. * @param {Tensor} speaker_embeddings Tensor containing the speaker embeddings. * @param {Object} options Optional parameters for generating speech. * @param {number} [options.threshold=0.5] The generated sequence ends when the predicted stop token probability exceeds this value. * @param {number} [options.minlenratio=0.0] Used to calculate the minimum required length for the output sequence. * @param {number} [options.maxlenratio=20.0] Used to calculate the maximum allowed length for the output sequence. * @param {Object} [options.vocoder=null] The vocoder that converts the mel spectrogram into a speech waveform. If `null`, the output is the mel spectrogram. * @param {boolean} [options.output_cross_attentions=false] Whether or not to return the attentions tensors of the decoder's cross-attention layers. * @returns {Promise<SpeechOutput>} A promise which resolves to an object containing the spectrogram, waveform, and cross-attention tensors. */ async generate_speech(input_values, speaker_embeddings, { threshold = 0.5, minlenratio = 0.0, maxlenratio = 20.0, vocoder = null, // output_cross_attentions = false, // TODO add } = {}) { const model_inputs = { input_ids: input_values } const { encoder_outputs, encoder_attention_mask } = await encoderForward(this, model_inputs); // @ts-expect-error TS2339 const r = encoder_outputs.dims[1] / this.config.reduction_factor; const maxlen = Math.floor(r * maxlenratio); const minlen = Math.floor(r * minlenratio); // @ts-expect-error TS2339 const num_mel_bins = this.config.num_mel_bins; let spectrogramParts = []; let past_key_values = null; let decoder_outputs = null; let idx = 0; while (true) { ++idx; const use_cache_branch = boolTensor(!!decoder_outputs); let output_sequence; if (decoder_outputs) { output_sequence = decoder_outputs.output_sequence_out; } else { output_sequence = new Tensor( 'float32', new Float32Array(num_mel_bins), [1, 1, num_mel_bins], ) } let decoderFeeds = { use_cache_branch, output_sequence, encoder_attention_mask: encoder_attention_mask, speaker_embeddings: speaker_embeddings, encoder_hidden_states: encoder_outputs, }; this.addPastKeyValues(decoderFeeds, past_key_values); decoder_outputs = await sessionRun(this.sessions['decoder_model_merged'], decoderFeeds); past_key_values = this.getPastKeyValues(decoder_outputs, past_key_values); const { prob, spectrum } = decoder_outputs; spectrogramParts.push(spectrum); if (idx >= minlen && ( // Finished when stop token or maximum length is reached. Array.from(prob.data).filter(p => p >= threshold).length > 0 || idx >= maxlen )) { break; } } const spectrogram = cat(spectrogramParts); const { waveform } = await sessionRun(vocoder.sessions['model'], { spectrogram }); return { spectrogram, waveform, // cross_attentions: null, // TODO add } } } /** * HiFi-GAN vocoder. * * See [SpeechT5ForSpeechToText](./models#module_models.SpeechT5ForSpeechToText) for example usage. */ export class SpeechT5HifiGan extends PreTrainedModel { main_input_name = 'spectrogram'; } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // TrOCR models export class TrOCRPreTrainedModel extends PreTrainedModel { } /** * The TrOCR Decoder with a language modeling head. */ export class TrOCRForCausalLM extends TrOCRPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Mistral models /** * The bare Mistral Model outputting raw hidden-states without any specific head on top. */ export class MistralPreTrainedModel extends PreTrainedModel { } export class MistralModel extends MistralPreTrainedModel { } export class MistralForCausalLM extends MistralPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // ERNIE-4.5 models export class Ernie4_5_PretrainedModel extends PreTrainedModel { } export class Ernie4_5_Model extends Ernie4_5_PretrainedModel { } export class Ernie4_5_ForCausalLM extends Ernie4_5_PretrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Starcoder2 models /** * The bare Starcoder2 Model outputting raw hidden-states without any specific head on top. */ export class Starcoder2PreTrainedModel extends PreTrainedModel { } export class Starcoder2Model extends Starcoder2PreTrainedModel { } export class Starcoder2ForCausalLM extends Starcoder2PreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Falcon models /** * The bare Falcon Model outputting raw hidden-states without any specific head on top. */ export class FalconPreTrainedModel extends PreTrainedModel { } export class FalconModel extends FalconPreTrainedModel { } export class FalconForCausalLM extends FalconPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // CLAP models export class ClapPreTrainedModel extends PreTrainedModel { } export class ClapModel extends ClapPreTrainedModel { } /** * CLAP Text Model with a projection layer on top (a linear layer on top of the pooled output). * * **Example:** Compute text embeddings with `ClapTextModelWithProjection`. * * ```javascript * import { AutoTokenizer, ClapTextModelWithProjection } from '@huggingface/transformers'; * * // Load tokenizer and text model * const tokenizer = await AutoTokenizer.from_pretrained('Xenova/clap-htsat-unfused'); * const text_model = await ClapTextModelWithProjection.from_pretrained('Xenova/clap-htsat-unfused'); * * // Run tokenization * const texts = ['a sound of a cat', 'a sound of a dog']; * const text_inputs = tokenizer(texts, { padding: true, truncation: true }); * * // Compute embeddings * const { text_embeds } = await text_model(text_inputs); * // Tensor { * // dims: [ 2, 512 ], * // type: 'float32', * // data: Float32Array(1024) [ ... ], * // size: 1024 * // } * ``` */ export class ClapTextModelWithProjection extends ClapPreTrainedModel { /** @type {typeof PreTrainedModel.from_pretrained} */ static async from_pretrained(pretrained_model_name_or_path, options = {}) { return super.from_pretrained(pretrained_model_name_or_path, { ...options, // Update default model file name if not provided model_file_name: options.model_file_name ?? 'text_model', }); } } /** * CLAP Audio Model with a projection layer on top (a linear layer on top of the pooled output). * * **Example:** Compute audio embeddings with `ClapAudioModelWithProjection`. * * ```javascript * import { AutoProcessor, ClapAudioModelWithProjection, read_audio } from '@huggingface/transformers'; * * // Load processor and audio model * const processor = await AutoProcessor.from_pretrained('Xenova/clap-htsat-unfused'); * const audio_model = await ClapAudioModelWithProjection.from_pretrained('Xenova/clap-htsat-unfused'); * * // Read audio and run processor * const audio = await read_audio('https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/cat_meow.wav'); * const audio_inputs = await processor(audio); * * // Compute embeddings * const { audio_embeds } = await audio_model(audio_inputs); * // Tensor { * // dims: [ 1, 512 ], * // type: 'float32', * // data: Float32Array(512) [ ... ], * // size: 512 * // } * ``` */ export class ClapAudioModelWithProjection extends ClapPreTrainedModel { /** @type {typeof PreTrainedModel.from_pretrained} */ static async from_pretrained(pretrained_model_name_or_path, options = {}) { return super.from_pretrained(pretrained_model_name_or_path, { ...options, // Update default model file name if not provided model_file_name: options.model_file_name ?? 'audio_model', }); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // VITS models export class VitsPreTrainedModel extends PreTrainedModel { } /** * The complete VITS model, for text-to-speech synthesis. * * **Example:** Generate speech from text with `VitsModel`. * ```javascript * import { AutoTokenizer, VitsModel } from '@huggingface/transformers'; * * // Load the tokenizer and model * const tokenizer = await AutoTokenizer.from_pretrained('Xenova/mms-tts-eng'); * const model = await VitsModel.from_pretrained('Xenova/mms-tts-eng'); * * // Run tokenization * const inputs = tokenizer('I love transformers'); * * // Generate waveform * const { waveform } = await model(inputs); * // Tensor { * // dims: [ 1, 35328 ], * // type: 'float32', * // data: Float32Array(35328) [ ... ], * // size: 35328, * // } * ``` */ export class VitsModel extends VitsPreTrainedModel { /** * Calls the model on new inputs. * @param {Object} model_inputs The inputs to the model. * @returns {Promise<VitsModelOutput>} The outputs for the VITS model. */ async _call(model_inputs) { return new VitsModelOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Segformer models export class SegformerPreTrainedModel extends PreTrainedModel { } /** * The bare SegFormer encoder (Mix-Transformer) outputting raw hidden-states without any specific head on top. */ export class SegformerModel extends SegformerPreTrainedModel { } /** * SegFormer Model transformer with an image classification head on top (a linear layer on top of the final hidden states) e.g. for ImageNet. */ export class SegformerForImageClassification extends SegformerPreTrainedModel { } /** * SegFormer Model transformer with an all-MLP decode head on top e.g. for ADE20k, CityScapes. */ export class SegformerForSemanticSegmentation extends SegformerPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // StableLm models export class StableLmPreTrainedModel extends PreTrainedModel { } /** * The bare StableLm Model transformer outputting raw hidden-states without any specific head on top. */ export class StableLmModel extends StableLmPreTrainedModel { } /** * StableLm Model with a `language modeling` head on top for Causal Language Modeling (with past). */ export class StableLmForCausalLM extends StableLmPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class EfficientNetPreTrainedModel extends PreTrainedModel { } /** * The bare EfficientNet model outputting raw features without any specific head on top. */ export class EfficientNetModel extends EfficientNetPreTrainedModel { } /** * EfficientNet Model with an image classification head on top (a linear layer on top of the pooled features). */ export class EfficientNetForImageClassification extends EfficientNetPreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Musicgen models export class MusicgenPreTrainedModel extends PreTrainedModel { } /** * The bare Musicgen decoder model outputting raw hidden-states without any specific head on top. */ export class MusicgenModel extends MusicgenPreTrainedModel { } /** * The MusicGen decoder model with a language modelling head on top. */ export class MusicgenForCausalLM extends MusicgenPreTrainedModel { } /** * The composite MusicGen model with a text encoder, audio encoder and Musicgen decoder, * for music generation tasks with one or both of text and audio prompts. * * **Example:** Generate music from text with `Xenova/musicgen-small`. * ```javascript * import { AutoTokenizer, MusicgenForConditionalGeneration } from '@huggingface/transformers'; * * // Load tokenizer and model * const tokenizer = await AutoTokenizer.from_pretrained('Xenova/musicgen-small'); * const model = await MusicgenForConditionalGeneration.from_pretrained( * 'Xenova/musicgen-small', { dtype: 'fp32' } * ); * * // Prepare text input * const prompt = '80s pop track with bassy drums and synth'; * const inputs = tokenizer(prompt); * * // Generate audio * const audio_values = await model.generate({ * ...inputs, * max_new_tokens: 512, * do_sample: true, * guidance_scale: 3, * }); * * // (Optional) Write the output to a WAV file * import wavefile from 'wavefile'; * import fs from 'fs'; * * const wav = new wavefile.WaveFile(); * wav.fromScratch(1, model.config.audio_encoder.sampling_rate, '32f', audio_values.data); * fs.writeFileSync('musicgen_out.wav', wav.toBuffer()); * ``` */ export class MusicgenForConditionalGeneration extends PreTrainedModel { // NOTE: not MusicgenPreTrainedModel forward_params = [ 'input_ids', 'attention_mask', 'encoder_outputs', 'decoder_input_ids', 'decoder_attention_mask', 'past_key_values', ]; /** * Apply the pattern mask to the final ids, * then revert the pattern delay mask by filtering the pad token id in a single step. * @param {Tensor} outputs The output tensor from the model. * @returns {Tensor} The filtered output tensor. */ _apply_and_filter_by_delay_pattern_mask(outputs) { const [bs_x_codebooks, seqLength] = outputs.dims; // @ts-expect-error TS2339 const num_codebooks = this.config.decoder.num_codebooks; const upperBound = (seqLength - num_codebooks); let newDataSize = 0; for (let i = 0; i < outputs.size; ++i) { // @ts-expect-error TS2339 if (outputs.data[i] === this.config.decoder.pad_token_id) { continue; } const row = (i % seqLength); const col = Math.floor(i / seqLength) % num_codebooks; const diff = row - col; if (diff > 0 && diff <= upperBound) { outputs.data[newDataSize++] = outputs.data[i]; } } const batch_size = Math.floor(bs_x_codebooks / num_codebooks); const inferred = newDataSize / (batch_size * num_codebooks); // TODO: assert `inferred` is an integer return new Tensor( outputs.type, outputs.data.slice(0, newDataSize), [batch_size, num_codebooks, inferred] ); } prepare_inputs_for_generation(input_ids, model_inputs, generation_config) { // apply the delay pattern mask let clonedInputIds = structuredClone(input_ids); for (let i = 0; i < clonedInputIds.length; ++i) { for (let j = 0; j < clonedInputIds[i].length; ++j) { // @ts-expect-error TS2339 if ((i % this.config.decoder.num_codebooks) >= j) { // @ts-expect-error TS2339 clonedInputIds[i][j] = BigInt(this.config.decoder.pad_token_id); } } } // for classifier free guidance we need to replicate the decoder args across the batch dim // (we'll split these before sampling) if (generation_config.guidance_scale !== null && generation_config.guidance_scale > 1) { // [batch, seqLength] -> [2 * batch, seqLength] clonedInputIds = clonedInputIds.concat(clonedInputIds); } const prepped = super.prepare_inputs_for_generation(clonedInputIds, model_inputs, generation_config); return prepped; } /** * Generates sequences of token ids for models with a language modeling head. * @param {import('./generation/parameters.js').GenerationFunctionParameters} options * @returns {Promise<ModelOutput|Tensor>} The output of the model, which can contain the generated token ids, attentions, and scores. */ async generate(options) { const output_ids = await super.generate(options); // apply the pattern mask to the final ids // tensor: int64[1,batch_size,4,chunk_length] const audio_codes = this._apply_and_filter_by_delay_pattern_mask( /** @type {Tensor} */(output_ids) ).unsqueeze_(0); // append the frame dimension back to the audio codes const { audio_values } = await sessionRun(this.sessions['encodec_decode'], { audio_codes }) return audio_values; } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // MobileNetV1 models export class MobileNetV1PreTrainedModel extends PreTrainedModel { } /** * The bare MobileNetV1 model outputting raw hidden-states without any specific head on top. */ export class MobileNetV1Model extends MobileNetV1PreTrainedModel { } /** * MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), * e.g. for ImageNet. */ export class MobileNetV1ForImageClassification extends MobileNetV1PreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } export class MobileNetV1ForSemanticSegmentation extends MobileNetV1PreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // MobileNetV2 models export class MobileNetV2PreTrainedModel extends PreTrainedModel { } /** * The bare MobileNetV2 model outputting raw hidden-states without any specific head on top. */ export class MobileNetV2Model extends MobileNetV2PreTrainedModel { } /** * MobileNetV2 model with an image classification head on top (a linear layer on top of the pooled features), * e.g. for ImageNet. */ export class MobileNetV2ForImageClassification extends MobileNetV2PreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } export class MobileNetV2ForSemanticSegmentation extends MobileNetV2PreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // MobileNetV3 models export class MobileNetV3PreTrainedModel extends PreTrainedModel { } /** * The bare MobileNetV3 model outputting raw hidden-states without any specific head on top. */ export class MobileNetV3Model extends MobileNetV3PreTrainedModel { } /** * MobileNetV3 model with an image classification head on top (a linear layer on top of the pooled features), * e.g. for ImageNet. */ export class MobileNetV3ForImageClassification extends MobileNetV3PreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } export class MobileNetV3ForSemanticSegmentation extends MobileNetV3PreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // MobileNetV4 models export class MobileNetV4PreTrainedModel extends PreTrainedModel { } /** * The bare MobileNetV4 model outputting raw hidden-states without any specific head on top. */ export class MobileNetV4Model extends MobileNetV4PreTrainedModel { } /** * MobileNetV4 model with an image classification head on top (a linear layer on top of the pooled features), * e.g. for ImageNet. */ export class MobileNetV4ForImageClassification extends MobileNetV4PreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new SequenceClassifierOutput(await super._call(model_inputs)); } } export class MobileNetV4ForSemanticSegmentation extends MobileNetV4PreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Decision Transformer models export class DecisionTransformerPreTrainedModel extends PreTrainedModel { } /** * The model builds upon the GPT2 architecture to perform autoregressive prediction of actions in an offline RL setting. * Refer to the paper for more details: https://huggingface.co/papers/2106.01345 */ export class DecisionTransformerModel extends DecisionTransformerPreTrainedModel { } ////////////////////////////////////////////////// export class MultiModalityPreTrainedModel extends PreTrainedModel { } export class MultiModalityCausalLM extends MultiModalityPreTrainedModel { forward_params = [ // prepare_inputs_embeds 'input_ids', 'pixel_values', 'images_seq_mask', 'images_emb_mask', // language_model 'attention_mask', 'position_ids', 'past_key_values', ]; /** * @param {ConstructorParameters<typeof MultiModalityPreTrainedModel>} args */ constructor(...args) { super(...args); // State-based approach to switch out which heads to use during generation this._generation_mode = 'text'; } async forward(model_inputs) { const mode = this._generation_mode ?? 'text'; // TODO support re-using PKVs for input_ids.dims[1] !== 1 // if (model_inputs.past_key_values) { // // && model_inputs.input_ids.dims[1] === 1 // } let output_1; if (mode === 'text' || !model_inputs.past_key_values) { const session = this.sessions['prepare_inputs_embeds']; const prep_inputs = pick(model_inputs, session.inputNames); output_1 = await sessionRun(session, prep_inputs); } else { const session = this.sessions['gen_img_embeds']; const prep_inputs = pick({ image_ids: model_inputs.input_ids, }, session.inputNames); output_1 = await sessionRun(session, prep_inputs); } const input_2 = { ...model_inputs, ...output_1 } const output_2 = await decoderForward(this, input_2); const head = this.sessions[ mode === 'text' ? 'lm_head' : 'gen_head' ]; if (!head) { throw new Error(`Unable to find "${head}" generation head`); } const output_3 = await sessionRun(head, pick(output_2, head.inputNames)) return { ...output_1, ...output_2, ...output_3, }; } /** * @param {import('./generation/parameters.js').GenerationFunctionParameters} options */ async generate(options) { this._generation_mode = 'text'; return super.generate(options); } /** * @param {import('./generation/parameters.js').GenerationFunctionParameters} options */ async generate_images(options) { this._generation_mode = 'image'; const start_num_tokens = (options.inputs ?? options[this.main_input_name]).dims[1]; const all_tokens = await super.generate(options); const generated_tokens = (/** @type {Tensor} */(all_tokens)).slice(null, [start_num_tokens, null]) const image_decode = this.sessions['image_decode']; const { decoded_image } = await sessionRun(image_decode, { generated_tokens, }); // Equivalent to `np.clip((dec + 1) / 2 * 255, 0, 255)` const clamped = decoded_image .add_(1) .mul_(255 / 2) .clamp_(0, 255) .to('uint8'); // Return as a list of images const images = []; for (const tensor of clamped) { const img = RawImage.fromTensor(tensor); images.push(img); } return images; } } export class MgpstrModelOutput extends ModelOutput { constructor({ char_logits, bpe_logits, wp_logits }) { super(); this.char_logits = char_logits; this.bpe_logits = bpe_logits; this.wp_logits = wp_logits; } get logits() { return [this.char_logits, this.bpe_logits, this.wp_logits]; } } export class MgpstrPreTrainedModel extends PreTrainedModel { } /** * MGP-STR Model transformer with three classification heads on top * (three A^3 modules and three linear layer on top of the transformer encoder output) for scene text recognition (STR). */ export class MgpstrForSceneTextRecognition extends MgpstrPreTrainedModel { /** * @param {any} model_inputs */ async _call(model_inputs) { return new MgpstrModelOutput(await super._call(model_inputs)); } } ////////////////////////////////////////////////// // PatchTST Transformer models export class PatchTSTPreTrainedModel extends PreTrainedModel { } /** * The bare PatchTST Model outputting raw hidden-states without any specific head. */ export class PatchTSTModel extends PatchTSTPreTrainedModel { } /** * The PatchTST for prediction model. */ export class PatchTSTForPrediction extends PatchTSTPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // PatchTSMixer Transformer models export class PatchTSMixerPreTrainedModel extends PreTrainedModel { } /** * The bare PatchTSMixer Model outputting raw hidden-states without any specific head. */ export class PatchTSMixerModel extends PatchTSMixerPreTrainedModel { } /** * The PatchTSMixer for prediction model. */ export class PatchTSMixerForPrediction extends PatchTSMixerPreTrainedModel { } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class UltravoxPreTrainedModel extends PreTrainedModel { forward_params = [ 'input_ids', 'attention_mask', 'position_ids', 'audio_values', 'past_key_values', ]; } export class UltravoxModel extends UltravoxPreTrainedModel { _merge_input_ids_with_audio_features(kwargs) { const audio_hidden_size = kwargs.audio_features.dims.at(-1); const reshaped_audio_features = kwargs.audio_features.view(-1, audio_hidden_size); return default_merge_input_ids_with_audio_features({ // @ts-ignore audio_token_id: this.config.ignore_index ?? this.config.audio_token_id, ...kwargs, audio_features: reshaped_audio_features, }) } } ////////////////////////////////////////////////// export class VoxtralForConditionalGeneration extends UltravoxModel { } ////////////////////////////////////////////////// // Mimi models export class MimiPreTrainedModel extends PreTrainedModel { main_input_name = 'input_values'; forward_params = ['input_values']; } export class MimiEncoderOutput extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.audio_codes Discrete code embeddings, of shape `(batch_size, num_quantizers, codes_length)`. */ constructor({ audio_codes }) { super(); this.audio_codes = audio_codes; } } export class MimiDecoderOutput extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.audio_values Decoded audio values, of shape `(batch_size, num_channels, sequence_length)`. */ constructor({ audio_values }) { super(); this.audio_values = audio_values; } } /** * The Mimi neural audio codec model. */ export class MimiModel extends MimiPreTrainedModel { /** * Encodes the input audio waveform into discrete codes. * @param {Object} inputs Model inputs * @param {Tensor} [inputs.input_values] Float values of the input audio waveform, of shape `(batch_size, channels, sequence_length)`). * @returns {Promise<MimiEncoderOutput>} The output tensor of shape `(batch_size, num_codebooks, sequence_length)`. */ async encode(inputs) { return new MimiEncoderOutput(await sessionRun(this.sessions['encoder_model'], inputs)); } /** * Decodes the given frames into an output audio waveform. * @param {MimiEncoderOutput} inputs The encoded audio codes. * @returns {Promise<MimiDecoderOutput>} The output tensor of shape `(batch_size, num_channels, sequence_length)`. */ async decode(inputs) { return new MimiDecoderOutput(await sessionRun(this.sessions['decoder_model'], inputs)); } } export class MimiEncoderModel extends MimiPreTrainedModel { /** @type {typeof PreTrainedModel.from_pretrained} */ static async from_pretrained(pretrained_model_name_or_path, options = {}) { return super.from_pretrained(pretrained_model_name_or_path, { ...options, // Update default model file name if not provided model_file_name: options.model_file_name ?? 'encoder_model', }); } } export class MimiDecoderModel extends MimiPreTrainedModel { /** @type {typeof PreTrainedModel.from_pretrained} */ static async from_pretrained(pretrained_model_name_or_path, options = {}) { return super.from_pretrained(pretrained_model_name_or_path, { ...options, // Update default model file name if not provided model_file_name: options.model_file_name ?? 'decoder_model', }); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Dac models export class DacPreTrainedModel extends PreTrainedModel { main_input_name = 'input_values'; forward_params = ['input_values']; } export class DacEncoderOutput extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.audio_codes Discrete code embeddings, of shape `(batch_size, num_quantizers, codes_length)`. */ constructor({ audio_codes }) { super(); this.audio_codes = audio_codes; } } export class DacDecoderOutput extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.audio_values Decoded audio values, of shape `(batch_size, num_channels, sequence_length)`. */ constructor({ audio_values }) { super(); this.audio_values = audio_values; } } /** * The DAC (Descript Audio Codec) model. */ export class DacModel extends DacPreTrainedModel { /** * Encodes the input audio waveform into discrete codes. * @param {Object} inputs Model inputs * @param {Tensor} [inputs.input_values] Float values of the input audio waveform, of shape `(batch_size, channels, sequence_length)`). * @returns {Promise<DacEncoderOutput>} The output tensor of shape `(batch_size, num_codebooks, sequence_length)`. */ async encode(inputs) { return new DacEncoderOutput(await sessionRun(this.sessions['encoder_model'], inputs)); } /** * Decodes the given frames into an output audio waveform. * @param {DacEncoderOutput} inputs The encoded audio codes. * @returns {Promise<DacDecoderOutput>} The output tensor of shape `(batch_size, num_channels, sequence_length)`. */ async decode(inputs) { return new DacDecoderOutput(await sessionRun(this.sessions['decoder_model'], inputs)); } } export class DacEncoderModel extends DacPreTrainedModel { /** @type {typeof PreTrainedModel.from_pretrained} */ static async from_pretrained(pretrained_model_name_or_path, options = {}) { return super.from_pretrained(pretrained_model_name_or_path, { ...options, // Update default model file name if not provided model_file_name: options.model_file_name ?? 'encoder_model', }); } } export class DacDecoderModel extends DacPreTrainedModel { /** @type {typeof PreTrainedModel.from_pretrained} */ static async from_pretrained(pretrained_model_name_or_path, options = {}) { return super.from_pretrained(pretrained_model_name_or_path, { ...options, // Update default model file name if not provided model_file_name: options.model_file_name ?? 'decoder_model', }); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // Snac models export class SnacPreTrainedModel extends PreTrainedModel { main_input_name = 'input_values'; forward_params = ['input_values']; } /** * The SNAC (Multi-Scale Neural Audio Codec) model. */ export class SnacModel extends SnacPreTrainedModel { /** * Encodes the input audio waveform into discrete codes. * @param {Object} inputs Model inputs * @param {Tensor} [inputs.input_values] Float values of the input audio waveform, of shape `(batch_size, channels, sequence_length)`). * @returns {Promise<Record<string, Tensor>>} The output tensors of shape `(batch_size, num_codebooks, sequence_length)`. */ async encode(inputs) { return await sessionRun(this.sessions['encoder_model'], inputs); } /** * Decodes the given frames into an output audio waveform. * @param {Record<string, Tensor>} inputs The encoded audio codes. * @returns {Promise<{audio_values: Tensor}>} The output tensor of shape `(batch_size, num_channels, sequence_length)`. */ async decode(inputs) { return await sessionRun(this.sessions['decoder_model'], inputs); } } export class SnacEncoderModel extends SnacPreTrainedModel { /** @type {typeof PreTrainedModel.from_pretrained} */ static async from_pretrained(pretrained_model_name_or_path, options = {}) { return super.from_pretrained(pretrained_model_name_or_path, { ...options, // Update default model file name if not provided model_file_name: options.model_file_name ?? 'encoder_model', }); } } export class SnacDecoderModel extends SnacPreTrainedModel { /** @type {typeof PreTrainedModel.from_pretrained} */ static async from_pretrained(pretrained_model_name_or_path, options = {}) { return super.from_pretrained(pretrained_model_name_or_path, { ...options, // Update default model file name if not provided model_file_name: options.model_file_name ?? 'decoder_model', }); } } ////////////////////////////////////////////////// ////////////////////////////////////////////////// // AutoModels, used to simplify construction of PreTrainedModels // (uses config to instantiate correct class) /** * Base class of all AutoModels. Contains the `from_pretrained` function * which is used to instantiate pretrained models. */ export class PretrainedMixin { /** * Mapping from model type to model class. * @type {Map<string, Object>[]} */ static MODEL_CLASS_MAPPINGS = null; /** * Whether to attempt to instantiate the base class (`PretrainedModel`) if * the model type is not found in the mapping. */ static BASE_IF_FAIL = false; /** @type {typeof PreTrainedModel.from_pretrained} */ static async from_pretrained(pretrained_model_name_or_path, { progress_callback = null, config = null, cache_dir = null, local_files_only = false, revision = 'main', model_file_name = null, subfolder = 'onnx', device = null, dtype = null, use_external_data_format = null, session_options = {}, } = {}) { const options = { progress_callback, config, cache_dir, local_files_only, revision, model_file_name, subfolder, device, dtype, use_external_data_format, session_options, } options.config = await AutoConfig.from_pretrained(pretrained_model_name_or_path, options); if (!this.MODEL_CLASS_MAPPINGS) { throw new Error("`MODEL_CLASS_MAPPINGS` not implemented for this type of `AutoClass`: " + this.name); } const model_type = options.config.model_type; for (const MODEL_CLASS_MAPPING of this.MODEL_CLASS_MAPPINGS) { let modelInfo = MODEL_CLASS_MAPPING.get(model_type); if (!modelInfo) { // As a fallback, we check if model_type is specified as the exact class for (const cls of MODEL_CLASS_MAPPING.values()) { if (cls[0] === model_type) { modelInfo = cls; break; } } if (!modelInfo) continue; // Item not found in this mapping } return await modelInfo[1].from_pretrained(pretrained_model_name_or_path, options); } if (this.BASE_IF_FAIL) { if (!(CUSTOM_ARCHITECTURES.has(model_type))) { console.warn(`Unknown model class "${model_type}", attempting to construct from base class.`); } return await PreTrainedModel.from_pretrained(pretrained_model_name_or_path, options); } else { throw Error(`Unsupported model type: ${model_type}`) } } } const MODEL_MAPPING_NAMES_ENCODER_ONLY = new Map([ ['bert', ['BertModel', BertModel]], ['neobert', ['NeoBertModel', NeoBertModel]], ['modernbert', ['ModernBertModel', ModernBertModel]], ['nomic_bert', ['NomicBertModel', NomicBertModel]], ['roformer', ['RoFormerModel', RoFormerModel]], ['electra', ['ElectraModel', ElectraModel]], ['esm', ['EsmModel', EsmModel]], ['convbert', ['ConvBertModel', ConvBertModel]], ['camembert', ['CamembertModel', CamembertModel]], ['deberta', ['DebertaModel', DebertaModel]], ['deberta-v2', ['DebertaV2Model', DebertaV2Model]], ['mpnet', ['MPNetModel', MPNetModel]], ['albert', ['AlbertModel', AlbertModel]], ['distilbert', ['DistilBertModel', DistilBertModel]], ['roberta', ['RobertaModel', RobertaModel]], ['xlm', ['XLMModel', XLMModel]], ['xlm-roberta', ['XLMRobertaModel', XLMRobertaModel]], ['clap', ['ClapModel', ClapModel]], ['clip', ['CLIPModel', CLIPModel]], ['clipseg', ['CLIPSegModel', CLIPSegModel]], ['chinese_clip', ['ChineseCLIPModel', ChineseCLIPModel]], ['siglip', ['SiglipModel', SiglipModel]], ['jina_clip', ['JinaCLIPModel', JinaCLIPModel]], ['mobilebert', ['MobileBertModel', MobileBertModel]], ['squeezebert', ['SqueezeBertModel', SqueezeBertModel]], ['wav2vec2', ['Wav2Vec2Model', Wav2Vec2Model]], ['wav2vec2-bert', ['Wav2Vec2BertModel', Wav2Vec2BertModel]], ['unispeech', ['UniSpeechModel', UniSpeechModel]], ['unispeech-sat', ['UniSpeechSatModel', UniSpeechSatModel]], ['hubert', ['HubertModel', HubertModel]], ['wavlm', ['WavLMModel', WavLMModel]], ['audio-spectrogram-transformer', ['ASTModel', ASTModel]], ['vits', ['VitsModel', VitsModel]], ['pyannote', ['PyAnnoteModel', PyAnnoteModel]], ['wespeaker-resnet', ['WeSpeakerResNetModel', WeSpeakerResNetModel]], ['detr', ['DetrModel', DetrModel]], ['rt_detr', ['RTDetrModel', RTDetrModel]], ['rt_detr_v2', ['RTDetrV2Model', RTDetrV2Model]], ['rf_detr', ['RFDetrModel', RFDetrModel]], ['d_fine', ['DFineModel', DFineModel]], ['table-transformer', ['TableTransformerModel', TableTransformerModel]], ['vit', ['ViTModel', ViTModel]], ['ijepa', ['IJepaModel', IJepaModel]], ['pvt', ['PvtModel', PvtModel]], ['vit_msn', ['ViTMSNModel', ViTMSNModel]], ['vit_mae', ['ViTMAEModel', ViTMAEModel]], ['groupvit', ['GroupViTModel', GroupViTModel]], ['fastvit', ['FastViTModel', FastViTModel]], ['mobilevit', ['MobileViTModel', MobileViTModel]], ['mobilevitv2', ['MobileViTV2Model', MobileViTV2Model]], ['owlvit', ['OwlViTModel', OwlViTModel]], ['owlv2', ['Owlv2Model', Owlv2Model]], ['beit', ['BeitModel', BeitModel]], ['deit', ['DeiTModel', DeiTModel]], ['hiera', ['HieraModel', HieraModel]], ['convnext', ['ConvNextModel', ConvNextModel]], ['convnextv2', ['ConvNextV2Model', ConvNextV2Model]], ['dinov2', ['Dinov2Model', Dinov2Model]], ['dinov2_with_registers', ['Dinov2WithRegistersModel', Dinov2WithRegistersModel]], ['dinov3_vit', ['DINOv3ViTModel', DINOv3ViTModel]], ['dinov3_convnext', ['DINOv3ConvNextModel', DINOv3ConvNextModel]], ['resnet', ['ResNetModel', ResNetModel]], ['swin', ['SwinModel', SwinModel]], ['swin2sr', ['Swin2SRModel', Swin2SRModel]], ['donut-swin', ['DonutSwinModel', DonutSwinModel]], ['yolos', ['YolosModel', YolosModel]], ['dpt', ['DPTModel', DPTModel]], ['glpn', ['GLPNModel', GLPNModel]], ['hifigan', ['SpeechT5HifiGan', SpeechT5HifiGan]], ['efficientnet', ['EfficientNetModel', EfficientNetModel]], ['decision_transformer', ['DecisionTransformerModel', DecisionTransformerModel]], ['patchtst', ['PatchTSTForPrediction', PatchTSTModel]], ['patchtsmixer', ['PatchTSMixerForPrediction', PatchTSMixerModel]], ['mobilenet_v1', ['MobileNetV1Model', MobileNetV1Model]], ['mobilenet_v2', ['MobileNetV2Model', MobileNetV2Model]], ['mobilenet_v3', ['MobileNetV3Model', MobileNetV3Model]], ['mobilenet_v4', ['MobileNetV4Model', MobileNetV4Model]], ['maskformer', ['MaskFormerModel', MaskFormerModel]], ['mgp-str', ['MgpstrForSceneTextRecognition', MgpstrForSceneTextRecognition]], ['style_text_to_speech_2', ['StyleTextToSpeech2Model', StyleTextToSpeech2Model]], ]); const MODEL_MAPPING_NAMES_ENCODER_DECODER = new Map([ ['t5', ['T5Model', T5Model]], ['longt5', ['LongT5Model', LongT5Model]], ['mt5', ['MT5Model', MT5Model]], ['bart', ['BartModel', BartModel]], ['mbart', ['MBartModel', MBartModel]], ['marian', ['MarianModel', MarianModel]], ['whisper', ['WhisperModel', WhisperModel]], ['m2m_100', ['M2M100Model', M2M100Model]], ['blenderbot', ['BlenderbotModel', BlenderbotModel]], ['blenderbot-small', ['BlenderbotSmallModel', BlenderbotSmallModel]], ]); const MODEL_MAPPING_NAMES_AUTO_ENCODER = new Map([ ['mimi', ['MimiModel', MimiModel]], ['dac', ['DacModel', DacModel]], ['snac', ['SnacModel', SnacModel]], ]); const MODEL_MAPPING_NAMES_DECODER_ONLY = new Map([ ['bloom', ['BloomModel', BloomModel]], ['jais', ['JAISModel', JAISModel]], ['gpt2', ['GPT2Model', GPT2Model]], ['gptj', ['GPTJModel', GPTJModel]], ['gpt_bigcode', ['GPTBigCodeModel', GPTBigCodeModel]], ['gpt_neo', ['GPTNeoModel', GPTNeoModel]], ['gpt_neox', ['GPTNeoXModel', GPTNeoXModel]], ['codegen', ['CodeGenModel', CodeGenModel]], ['llama', ['LlamaModel', LlamaModel]], ['arcee', ['ArceeModel', ArceeModel]], ['lfm2', ['Lfm2Model', Lfm2Model]], ['smollm3', ['SmolLM3Model', SmolLM3Model]], ['exaone', ['ExaoneModel', ExaoneModel]], ['olmo', ['OlmoModel', OlmoModel]], ['olmo2', ['Olmo2Model', Olmo2Model]], ['mobilellm', ['MobileLLMModel', MobileLLMModel]], ['granite', ['GraniteModel', GraniteModel]], ['cohere', ['CohereModel', CohereModel]], ['gemma', ['GemmaModel', GemmaModel]], ['gemma2', ['Gemma2Model', Gemma2Model]], ['gemma3_text', ['Gemma3Model', Gemma3Model]], ['helium', ['HeliumModel', HeliumModel]], ['glm', ['GlmModel', GlmModel]], ['openelm', ['OpenELMModel', OpenELMModel]], ['qwen2', ['Qwen2Model', Qwen2Model]], ['qwen3', ['Qwen3Model', Qwen3Model]], ['phi', ['PhiModel', PhiModel]], ['phi3', ['Phi3Model', Phi3Model]], ['mpt', ['MptModel', MptModel]], ['opt', ['OPTModel', OPTModel]], ['mistral', ['MistralModel', MistralModel]], ['ernie4_5', ['Ernie4_5_Model', Ernie4_5_Model]], ['starcoder2', ['Starcoder2Model', Starcoder2Model]], ['falcon', ['FalconModel', FalconModel]], ['stablelm', ['StableLmModel', StableLmModel]], ['modernbert-decoder', ['ModernBertDecoderModel', ModernBertDecoderModel]], ]); const MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES = new Map([ ['speecht5', ['SpeechT5ForSpeechToText', SpeechT5ForSpeechToText]], ['whisper', ['WhisperForConditionalGeneration', WhisperForConditionalGeneration]], ['lite-whisper', ['LiteWhisperForConditionalGeneration', LiteWhisperForConditionalGeneration]], ['moonshine', ['MoonshineForConditionalGeneration', MoonshineForConditionalGeneration]], ]); const MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING_NAMES = new Map([ ['speecht5', ['SpeechT5ForTextToSpeech', SpeechT5ForTextToSpeech]], ]); const MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING_NAMES = new Map([ ['vits', ['VitsModel', VitsModel]], ['musicgen', ['MusicgenForConditionalGeneration', MusicgenForConditionalGeneration]], ]); const MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES = new Map([ ['bert', ['BertForSequenceClassification', BertForSequenceClassification]], ['neobert', ['NeoBertForSequenceClassification', NeoBertForSequenceClassification]], ['modernbert', ['ModernBertForSequenceClassification', ModernBertForSequenceClassification]], ['roformer', ['RoFormerForSequenceClassification', RoFormerForSequenceClassification]], ['electra', ['ElectraForSequenceClassification', ElectraForSequenceClassification]], ['esm', ['EsmForSequenceClassification', EsmForSequenceClassification]], ['convbert', ['ConvBertForSequenceClassification', ConvBertForSequenceClassification]], ['camembert', ['CamembertForSequenceClassification', CamembertForSequenceClassification]], ['deberta', ['DebertaForSequenceClassification', DebertaForSequenceClassification]], ['deberta-v2', ['DebertaV2ForSequenceClassification', DebertaV2ForSequenceClassification]], ['mpnet', ['MPNetForSequenceClassification', MPNetForSequenceClassification]], ['albert', ['AlbertForSequenceClassification', AlbertForSequenceClassification]], ['distilbert', ['DistilBertForSequenceClassification', DistilBertForSequenceClassification]], ['roberta', ['RobertaForSequenceClassification', RobertaForSequenceClassification]], ['xlm', ['XLMForSequenceClassification', XLMForSequenceClassification]], ['xlm-roberta', ['XLMRobertaForSequenceClassification', XLMRobertaForSequenceClassification]], ['bart', ['BartForSequenceClassification', BartForSequenceClassification]], ['mbart', ['MBartForSequenceClassification', MBartForSequenceClassification]], ['mobilebert', ['MobileBertForSequenceClassification', MobileBertForSequenceClassification]], ['squeezebert', ['SqueezeBertForSequenceClassification', SqueezeBertForSequenceClassification]], ]); const MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES = new Map([ ['bert', ['BertForTokenClassification', BertForTokenClassification]], ['neobert', ['NeoBertForTokenClassification', NeoBertForTokenClassification]], ['modernbert', ['ModernBertForTokenClassification', ModernBertForTokenClassification]], ['roformer', ['RoFormerForTokenClassification', RoFormerForTokenClassification]], ['electra', ['ElectraForTokenClassification', ElectraForTokenClassification]], ['esm', ['EsmForTokenClassification', EsmForTokenClassification]], ['convbert', ['ConvBertForTokenClassification', ConvBertForTokenClassification]], ['camembert', ['CamembertForTokenClassification', CamembertForTokenClassification]], ['deberta', ['DebertaForTokenClassification', DebertaForTokenClassification]], ['deberta-v2', ['DebertaV2ForTokenClassification', DebertaV2ForTokenClassification]], ['mpnet', ['MPNetForTokenClassification', MPNetForTokenClassification]], ['distilbert', ['DistilBertForTokenClassification', DistilBertForTokenClassification]], ['roberta', ['RobertaForTokenClassification', RobertaForTokenClassification]], ['xlm', ['XLMForTokenClassification', XLMForTokenClassification]], ['xlm-roberta', ['XLMRobertaForTokenClassification', XLMRobertaForTokenClassification]], ]); const MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES = new Map([ ['t5', ['T5ForConditionalGeneration', T5ForConditionalGeneration]], ['longt5', ['LongT5ForConditionalGeneration', LongT5ForConditionalGeneration]], ['mt5', ['MT5ForConditionalGeneration', MT5ForConditionalGeneration]], ['bart', ['BartForConditionalGeneration', BartForConditionalGeneration]], ['mbart', ['MBartForConditionalGeneration', MBartForConditionalGeneration]], ['marian', ['MarianMTModel', MarianMTModel]], ['m2m_100', ['M2M100ForConditionalGeneration', M2M100ForConditionalGeneration]], ['blenderbot', ['BlenderbotForConditionalGeneration', BlenderbotForConditionalGeneration]], ['blenderbot-small', ['BlenderbotSmallForConditionalGeneration', BlenderbotSmallForConditionalGeneration]], ]); const MODEL_FOR_CAUSAL_LM_MAPPING_NAMES = new Map([ ['bloom', ['BloomForCausalLM', BloomForCausalLM]], ['gpt2', ['GPT2LMHeadModel', GPT2LMHeadModel]], ['jais', ['JAISLMHeadModel', JAISLMHeadModel]], ['gptj', ['GPTJForCausalLM', GPTJForCausalLM]], ['gpt_bigcode', ['GPTBigCodeForCausalLM', GPTBigCodeForCausalLM]], ['gpt_neo', ['GPTNeoForCausalLM', GPTNeoForCausalLM]], ['gpt_neox', ['GPTNeoXForCausalLM', GPTNeoXForCausalLM]], ['codegen', ['CodeGenForCausalLM', CodeGenForCausalLM]], ['llama', ['LlamaForCausalLM', LlamaForCausalLM]], ['arcee', ['ArceeForCausalLM', ArceeForCausalLM]], ['lfm2', ['Lfm2ForCausalLM', Lfm2ForCausalLM]], ['smollm3', ['SmolLM3ForCausalLM', SmolLM3ForCausalLM]], ['exaone', ['ExaoneForCausalLM', ExaoneForCausalLM]], ['olmo', ['OlmoForCausalLM', OlmoForCausalLM]], ['olmo2', ['Olmo2ForCausalLM', Olmo2ForCausalLM]], ['mobilellm', ['MobileLLMForCausalLM', MobileLLMForCausalLM]], ['granite', ['GraniteForCausalLM', GraniteForCausalLM]], ['cohere', ['CohereForCausalLM', CohereForCausalLM]], ['gemma', ['GemmaForCausalLM', GemmaForCausalLM]], ['gemma2', ['Gemma2ForCausalLM', Gemma2ForCausalLM]], ['gemma3_text', ['Gemma3ForCausalLM', Gemma3ForCausalLM]], ['helium', ['HeliumForCausalLM', HeliumForCausalLM]], ['glm', ['GlmForCausalLM', GlmForCausalLM]], ['openelm', ['OpenELMForCausalLM', OpenELMForCausalLM]], ['qwen2', ['Qwen2ForCausalLM', Qwen2ForCausalLM]], ['qwen3', ['Qwen3ForCausalLM', Qwen3ForCausalLM]], ['phi', ['PhiForCausalLM', PhiForCausalLM]], ['phi3', ['Phi3ForCausalLM', Phi3ForCausalLM]], ['mpt', ['MptForCausalLM', MptForCausalLM]], ['opt', ['OPTForCausalLM', OPTForCausalLM]], ['mbart', ['MBartForCausalLM', MBartForCausalLM]], ['mistral', ['MistralForCausalLM', MistralForCausalLM]], ['ernie4_5', ['Ernie4_5_ForCausalLM', Ernie4_5_ForCausalLM]], ['starcoder2', ['Starcoder2ForCausalLM', Starcoder2ForCausalLM]], ['falcon', ['FalconForCausalLM', FalconForCausalLM]], ['trocr', ['TrOCRForCausalLM', TrOCRForCausalLM]], ['stablelm', ['StableLmForCausalLM', StableLmForCausalLM]], ['modernbert-decoder', ['ModernBertDecoderForCausalLM', ModernBertDecoderForCausalLM]], // Also image-text-to-text ['phi3_v', ['Phi3VForCausalLM', Phi3VForCausalLM]], ]); const MODEL_FOR_MULTIMODALITY_MAPPING_NAMES = new Map([ ['multi_modality', ['MultiModalityCausalLM', MultiModalityCausalLM]], ]); const MODEL_FOR_MASKED_LM_MAPPING_NAMES = new Map([ ['bert', ['BertForMaskedLM', BertForMaskedLM]], ['neobert', ['NeoBertForMaskedLM', NeoBertForMaskedLM]], ['modernbert', ['ModernBertForMaskedLM', ModernBertForMaskedLM]], ['roformer', ['RoFormerForMaskedLM', RoFormerForMaskedLM]], ['electra', ['ElectraForMaskedLM', ElectraForMaskedLM]], ['esm', ['EsmForMaskedLM', EsmForMaskedLM]], ['convbert', ['ConvBertForMaskedLM', ConvBertForMaskedLM]], ['camembert', ['CamembertForMaskedLM', CamembertForMaskedLM]], ['deberta', ['DebertaForMaskedLM', DebertaForMaskedLM]], ['deberta-v2', ['DebertaV2ForMaskedLM', DebertaV2ForMaskedLM]], ['mpnet', ['MPNetForMaskedLM', MPNetForMaskedLM]], ['albert', ['AlbertForMaskedLM', AlbertForMaskedLM]], ['distilbert', ['DistilBertForMaskedLM', DistilBertForMaskedLM]], ['roberta', ['RobertaForMaskedLM', RobertaForMaskedLM]], ['xlm', ['XLMWithLMHeadModel', XLMWithLMHeadModel]], ['xlm-roberta', ['XLMRobertaForMaskedLM', XLMRobertaForMaskedLM]], ['mobilebert', ['MobileBertForMaskedLM', MobileBertForMaskedLM]], ['squeezebert', ['SqueezeBertForMaskedLM', SqueezeBertForMaskedLM]], ]); const MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES = new Map([ ['bert', ['BertForQuestionAnswering', BertForQuestionAnswering]], ['neobert', ['NeoBertForQuestionAnswering', NeoBertForQuestionAnswering]], ['roformer', ['RoFormerForQuestionAnswering', RoFormerForQuestionAnswering]], ['electra', ['ElectraForQuestionAnswering', ElectraForQuestionAnswering]], ['convbert', ['ConvBertForQuestionAnswering', ConvBertForQuestionAnswering]], ['camembert', ['CamembertForQuestionAnswering', CamembertForQuestionAnswering]], ['deberta', ['DebertaForQuestionAnswering', DebertaForQuestionAnswering]], ['deberta-v2', ['DebertaV2ForQuestionAnswering', DebertaV2ForQuestionAnswering]], ['mpnet', ['MPNetForQuestionAnswering', MPNetForQuestionAnswering]], ['albert', ['AlbertForQuestionAnswering', AlbertForQuestionAnswering]], ['distilbert', ['DistilBertForQuestionAnswering', DistilBertForQuestionAnswering]], ['roberta', ['RobertaForQuestionAnswering', RobertaForQuestionAnswering]], ['xlm', ['XLMForQuestionAnswering', XLMForQuestionAnswering]], ['xlm-roberta', ['XLMRobertaForQuestionAnswering', XLMRobertaForQuestionAnswering]], ['mobilebert', ['MobileBertForQuestionAnswering', MobileBertForQuestionAnswering]], ['squeezebert', ['SqueezeBertForQuestionAnswering', SqueezeBertForQuestionAnswering]], ]); const MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES = new Map([ ['vision-encoder-decoder', ['VisionEncoderDecoderModel', VisionEncoderDecoderModel]], ['idefics3', ['Idefics3ForConditionalGeneration', Idefics3ForConditionalGeneration]], ['smolvlm', ['SmolVLMForConditionalGeneration', SmolVLMForConditionalGeneration]], ]); const MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING_NAMES = new Map([ ['llava', ['LlavaForConditionalGeneration', LlavaForConditionalGeneration]], ['llava_onevision', ['LlavaOnevisionForConditionalGeneration', LlavaOnevisionForConditionalGeneration]], ['moondream1', ['Moondream1ForConditionalGeneration', Moondream1ForConditionalGeneration]], ['florence2', ['Florence2ForConditionalGeneration', Florence2ForConditionalGeneration]], ['qwen2-vl', ['Qwen2VLForConditionalGeneration', Qwen2VLForConditionalGeneration]], ['idefics3', ['Idefics3ForConditionalGeneration', Idefics3ForConditionalGeneration]], ['smolvlm', ['SmolVLMForConditionalGeneration', SmolVLMForConditionalGeneration]], ['paligemma', ['PaliGemmaForConditionalGeneration', PaliGemmaForConditionalGeneration]], ['llava_qwen2', ['LlavaQwen2ForCausalLM', LlavaQwen2ForCausalLM]], ['gemma3n', ['Gemma3nForConditionalGeneration', Gemma3nForConditionalGeneration]], ]); const MODEL_FOR_AUDIO_TEXT_TO_TEXT_MAPPING_NAMES = new Map([ ['ultravox', ['UltravoxModel', UltravoxModel]], ['voxtral', ['VoxtralForConditionalGeneration', VoxtralForConditionalGeneration]], ]); const MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES = new Map([ ['vision-encoder-decoder', ['VisionEncoderDecoderModel', VisionEncoderDecoderModel]], ]); const MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES = new Map([ ['vit', ['ViTForImageClassification', ViTForImageClassification]], ['ijepa', ['IJepaForImageClassification', IJepaForImageClassification]], ['pvt', ['PvtForImageClassification', PvtForImageClassification]], ['vit_msn', ['ViTMSNForImageClassification', ViTMSNForImageClassification]], ['fastvit', ['FastViTForImageClassification', FastViTForImageClassification]], ['mobilevit', ['MobileViTForImageClassification', MobileViTForImageClassification]], ['mobilevitv2', ['MobileViTV2ForImageClassification', MobileViTV2ForImageClassification]], ['beit', ['BeitForImageClassification', BeitForImageClassification]], ['deit', ['DeiTForImageClassification', DeiTForImageClassification]], ['hiera', ['HieraForImageClassification', HieraForImageClassification]], ['convnext', ['ConvNextForImageClassification', ConvNextForImageClassification]], ['convnextv2', ['ConvNextV2ForImageClassification', ConvNextV2ForImageClassification]], ['dinov2', ['Dinov2ForImageClassification', Dinov2ForImageClassification]], ['dinov2_with_registers', ['Dinov2WithRegistersForImageClassification', Dinov2WithRegistersForImageClassification]], ['resnet', ['ResNetForImageClassification', ResNetForImageClassification]], ['swin', ['SwinForImageClassification', SwinForImageClassification]], ['segformer', ['SegformerForImageClassification', SegformerForImageClassification]], ['efficientnet', ['EfficientNetForImageClassification', EfficientNetForImageClassification]], ['mobilenet_v1', ['MobileNetV1ForImageClassification', MobileNetV1ForImageClassification]], ['mobilenet_v2', ['MobileNetV2ForImageClassification', MobileNetV2ForImageClassification]], ['mobilenet_v3', ['MobileNetV3ForImageClassification', MobileNetV3ForImageClassification]], ['mobilenet_v4', ['MobileNetV4ForImageClassification', MobileNetV4ForImageClassification]], ]); const MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES = new Map([ ['detr', ['DetrForObjectDetection', DetrForObjectDetection]], ['rt_detr', ['RTDetrForObjectDetection', RTDetrForObjectDetection]], ['rt_detr_v2', ['RTDetrV2ForObjectDetection', RTDetrV2ForObjectDetection]], ['rf_detr', ['RFDetrForObjectDetection', RFDetrForObjectDetection]], ['d_fine', ['DFineForObjectDetection', DFineForObjectDetection]], ['table-transformer', ['TableTransformerForObjectDetection', TableTransformerForObjectDetection]], ['yolos', ['YolosForObjectDetection', YolosForObjectDetection]], ]); const MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES = new Map([ ['owlvit', ['OwlViTForObjectDetection', OwlViTForObjectDetection]], ['owlv2', ['Owlv2ForObjectDetection', Owlv2ForObjectDetection]], ['grounding-dino', ['GroundingDinoForObjectDetection', GroundingDinoForObjectDetection]], ]); const MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES = new Map([ // TODO: Do not add new models here ['detr', ['DetrForSegmentation', DetrForSegmentation]], ['clipseg', ['CLIPSegForImageSegmentation', CLIPSegForImageSegmentation]], ]); const MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES = new Map([ ['segformer', ['SegformerForSemanticSegmentation', SegformerForSemanticSegmentation]], ['sapiens', ['SapiensForSemanticSegmentation', SapiensForSemanticSegmentation]], ['swin', ['SwinForSemanticSegmentation', SwinForSemanticSegmentation]], ['mobilenet_v1', ['MobileNetV1ForSemanticSegmentation', MobileNetV1ForSemanticSegmentation]], ['mobilenet_v2', ['MobileNetV2ForSemanticSegmentation', MobileNetV2ForSemanticSegmentation]], ['mobilenet_v3', ['MobileNetV3ForSemanticSegmentation', MobileNetV3ForSemanticSegmentation]], ['mobilenet_v4', ['MobileNetV4ForSemanticSegmentation', MobileNetV4ForSemanticSegmentation]], ]); const MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING_NAMES = new Map([ ['detr', ['DetrForSegmentation', DetrForSegmentation]], ['maskformer', ['MaskFormerForInstanceSegmentation', MaskFormerForInstanceSegmentation]], ]); const MODEL_FOR_MASK_GENERATION_MAPPING_NAMES = new Map([ ['sam', ['SamModel', SamModel]], ]); const MODEL_FOR_CTC_MAPPING_NAMES = new Map([ ['wav2vec2', ['Wav2Vec2ForCTC', Wav2Vec2ForCTC]], ['wav2vec2-bert', ['Wav2Vec2BertForCTC', Wav2Vec2BertForCTC]], ['unispeech', ['UniSpeechForCTC', UniSpeechForCTC]], ['unispeech-sat', ['UniSpeechSatForCTC', UniSpeechSatForCTC]], ['wavlm', ['WavLMForCTC', WavLMForCTC]], ['hubert', ['HubertForCTC', HubertForCTC]], ]); const MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES = new Map([ ['wav2vec2', ['Wav2Vec2ForSequenceClassification', Wav2Vec2ForSequenceClassification]], ['wav2vec2-bert', ['Wav2Vec2BertForSequenceClassification', Wav2Vec2BertForSequenceClassification]], ['unispeech', ['UniSpeechForSequenceClassification', UniSpeechForSequenceClassification]], ['unispeech-sat', ['UniSpeechSatForSequenceClassification', UniSpeechSatForSequenceClassification]], ['wavlm', ['WavLMForSequenceClassification', WavLMForSequenceClassification]], ['hubert', ['HubertForSequenceClassification', HubertForSequenceClassification]], ['audio-spectrogram-transformer', ['ASTForAudioClassification', ASTForAudioClassification]], ]); const MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES = new Map([ ['wavlm', ['WavLMForXVector', WavLMForXVector]], ]); const MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES = new Map([ ['unispeech-sat', ['UniSpeechSatForAudioFrameClassification', UniSpeechSatForAudioFrameClassification]], ['wavlm', ['WavLMForAudioFrameClassification', WavLMForAudioFrameClassification]], ['wav2vec2', ['Wav2Vec2ForAudioFrameClassification', Wav2Vec2ForAudioFrameClassification]], ['pyannote', ['PyAnnoteForAudioFrameClassification', PyAnnoteForAudioFrameClassification]], ]); const MODEL_FOR_IMAGE_MATTING_MAPPING_NAMES = new Map([ ['vitmatte', ['VitMatteForImageMatting', VitMatteForImageMatting]], ]); const MODEL_FOR_TIME_SERIES_PREDICTION_MAPPING_NAMES = new Map([ ['patchtst', ['PatchTSTForPrediction', PatchTSTForPrediction]], ['patchtsmixer', ['PatchTSMixerForPrediction', PatchTSMixerForPrediction]], ]) const MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES = new Map([ ['swin2sr', ['Swin2SRForImageSuperResolution', Swin2SRForImageSuperResolution]], ]) const MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES = new Map([ ['dpt', ['DPTForDepthEstimation', DPTForDepthEstimation]], ['depth_anything', ['DepthAnythingForDepthEstimation', DepthAnythingForDepthEstimation]], ['glpn', ['GLPNForDepthEstimation', GLPNForDepthEstimation]], ['sapiens', ['SapiensForDepthEstimation', SapiensForDepthEstimation]], ['depth_pro', ['DepthProForDepthEstimation', DepthProForDepthEstimation]], ['metric3d', ['Metric3DForDepthEstimation', Metric3DForDepthEstimation]], ['metric3dv2', ['Metric3Dv2ForDepthEstimation', Metric3Dv2ForDepthEstimation]], ]) const MODEL_FOR_NORMAL_ESTIMATION_MAPPING_NAMES = new Map([ ['sapiens', ['SapiensForNormalEstimation', SapiensForNormalEstimation]], ]) const MODEL_FOR_POSE_ESTIMATION_MAPPING_NAMES = new Map([ ['vitpose', ['VitPoseForPoseEstimation', VitPoseForPoseEstimation]], ]) // NOTE: This is custom to Transformers.js, and is necessary because certain models // (e.g., CLIP) are split into vision and text components const MODEL_FOR_IMAGE_FEATURE_EXTRACTION_MAPPING_NAMES = new Map([ ['clip', ['CLIPVisionModelWithProjection', CLIPVisionModelWithProjection]], ['siglip', ['SiglipVisionModel', SiglipVisionModel]], ['jina_clip', ['JinaCLIPVisionModel', JinaCLIPVisionModel]], ]) const MODEL_CLASS_TYPE_MAPPING = [ // MODEL_MAPPING_NAMES: [MODEL_MAPPING_NAMES_ENCODER_ONLY, MODEL_TYPES.EncoderOnly], [MODEL_MAPPING_NAMES_ENCODER_DECODER, MODEL_TYPES.EncoderDecoder], [MODEL_MAPPING_NAMES_DECODER_ONLY, MODEL_TYPES.DecoderOnly], [MODEL_MAPPING_NAMES_AUTO_ENCODER, MODEL_TYPES.AutoEncoder], [MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_TYPES.Seq2Seq], [MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_TYPES.Seq2Seq], [MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_TYPES.DecoderOnly], [MODEL_FOR_MULTIMODALITY_MAPPING_NAMES, MODEL_TYPES.MultiModality], [MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES, MODEL_TYPES.Vision2Seq], [MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING_NAMES, MODEL_TYPES.ImageTextToText], [MODEL_FOR_AUDIO_TEXT_TO_TEXT_MAPPING_NAMES, MODEL_TYPES.AudioTextToText], [MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_IMAGE_MATTING_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_TIME_SERIES_PREDICTION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_NORMAL_ESTIMATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_POSE_ESTIMATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_MASK_GENERATION_MAPPING_NAMES, MODEL_TYPES.MaskGeneration], [MODEL_FOR_CTC_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING_NAMES, MODEL_TYPES.Seq2Seq], [MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], // Custom: [MODEL_FOR_IMAGE_FEATURE_EXTRACTION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], ]; for (const [mappings, type] of MODEL_CLASS_TYPE_MAPPING) { // @ts-ignore for (const [name, model] of mappings.values()) { MODEL_TYPE_MAPPING.set(name, type); MODEL_CLASS_TO_NAME_MAPPING.set(model, name); MODEL_NAME_TO_CLASS_MAPPING.set(name, model); } } const CUSTOM_MAPPING = [ // OVERRIDE: // TODO: Refactor to allow class to specify model ['MusicgenForConditionalGeneration', MusicgenForConditionalGeneration, MODEL_TYPES.Musicgen], ['Phi3VForCausalLM', Phi3VForCausalLM, MODEL_TYPES.Phi3V], ['CLIPTextModelWithProjection', CLIPTextModelWithProjection, MODEL_TYPES.EncoderOnly], ['SiglipTextModel', SiglipTextModel, MODEL_TYPES.EncoderOnly], ['JinaCLIPTextModel', JinaCLIPTextModel, MODEL_TYPES.EncoderOnly], ['ClapTextModelWithProjection', ClapTextModelWithProjection, MODEL_TYPES.EncoderOnly], ['ClapAudioModelWithProjection', ClapAudioModelWithProjection, MODEL_TYPES.EncoderOnly], ['DacEncoderModel', DacEncoderModel, MODEL_TYPES.EncoderOnly], ['DacDecoderModel', DacDecoderModel, MODEL_TYPES.EncoderOnly], ['MimiEncoderModel', MimiEncoderModel, MODEL_TYPES.EncoderOnly], ['MimiDecoderModel', MimiDecoderModel, MODEL_TYPES.EncoderOnly], ['SnacEncoderModel', SnacEncoderModel, MODEL_TYPES.EncoderOnly], ['SnacDecoderModel', SnacDecoderModel, MODEL_TYPES.EncoderOnly], ['Gemma3nForConditionalGeneration', Gemma3nForConditionalGeneration, MODEL_TYPES.ImageAudioTextToText], ] for (const [name, model, type] of CUSTOM_MAPPING) { MODEL_TYPE_MAPPING.set(name, type); MODEL_CLASS_TO_NAME_MAPPING.set(model, name); MODEL_NAME_TO_CLASS_MAPPING.set(name, model); } const CUSTOM_ARCHITECTURES = new Map([ ['modnet', MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES], ['birefnet', MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES], ['isnet', MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES], ['ben', MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES], ]); for (const [name, mapping] of CUSTOM_ARCHITECTURES.entries()) { mapping.set(name, ['PreTrainedModel', PreTrainedModel]) MODEL_TYPE_MAPPING.set(name, MODEL_TYPES.EncoderOnly); MODEL_CLASS_TO_NAME_MAPPING.set(PreTrainedModel, name); MODEL_NAME_TO_CLASS_MAPPING.set(name, PreTrainedModel); } /** * Helper class which is used to instantiate pretrained models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModel.from_pretrained('Xenova/bert-base-uncased'); */ export class AutoModel extends PretrainedMixin { /** @type {Map<string, Object>[]} */ // @ts-ignore static MODEL_CLASS_MAPPINGS = MODEL_CLASS_TYPE_MAPPING.map(x => x[0]); static BASE_IF_FAIL = true; } /** * Helper class which is used to instantiate pretrained sequence classification models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModelForSequenceClassification.from_pretrained('Xenova/distilbert-base-uncased-finetuned-sst-2-english'); */ export class AutoModelForSequenceClassification extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES]; } /** * Helper class which is used to instantiate pretrained token classification models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModelForTokenClassification.from_pretrained('Xenova/distilbert-base-multilingual-cased-ner-hrl'); */ export class AutoModelForTokenClassification extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES]; } /** * Helper class which is used to instantiate pretrained sequence-to-sequence models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModelForSeq2SeqLM.from_pretrained('Xenova/t5-small'); */ export class AutoModelForSeq2SeqLM extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES]; } /** * Helper class which is used to instantiate pretrained sequence-to-sequence speech-to-text models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModelForSpeechSeq2Seq.from_pretrained('openai/whisper-tiny.en'); */ export class AutoModelForSpeechSeq2Seq extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES]; } /** * Helper class which is used to instantiate pretrained sequence-to-sequence text-to-spectrogram models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModelForTextToSpectrogram.from_pretrained('microsoft/speecht5_tts'); */ export class AutoModelForTextToSpectrogram extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING_NAMES]; } /** * Helper class which is used to instantiate pretrained text-to-waveform models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModelForTextToSpectrogram.from_pretrained('facebook/mms-tts-eng'); */ export class AutoModelForTextToWaveform extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING_NAMES]; } /** * Helper class which is used to instantiate pretrained causal language models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModelForCausalLM.from_pretrained('Xenova/gpt2'); */ export class AutoModelForCausalLM extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_CAUSAL_LM_MAPPING_NAMES]; } /** * Helper class which is used to instantiate pretrained masked language models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModelForMaskedLM.from_pretrained('Xenova/bert-base-uncased'); */ export class AutoModelForMaskedLM extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_MASKED_LM_MAPPING_NAMES]; } /** * Helper class which is used to instantiate pretrained question answering models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModelForQuestionAnswering.from_pretrained('Xenova/distilbert-base-cased-distilled-squad'); */ export class AutoModelForQuestionAnswering extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES]; } /** * Helper class which is used to instantiate pretrained vision-to-sequence models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModelForVision2Seq.from_pretrained('Xenova/vit-gpt2-image-captioning'); */ export class AutoModelForVision2Seq extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES]; } /** * Helper class which is used to instantiate pretrained image classification models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModelForImageClassification.from_pretrained('Xenova/vit-base-patch16-224'); */ export class AutoModelForImageClassification extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES]; } /** * Helper class which is used to instantiate pretrained image segmentation models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModelForImageSegmentation.from_pretrained('Xenova/detr-resnet-50-panoptic'); */ export class AutoModelForImageSegmentation extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES]; } /** * Helper class which is used to instantiate pretrained image segmentation models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModelForSemanticSegmentation.from_pretrained('nvidia/segformer-b3-finetuned-cityscapes-1024-1024'); */ export class AutoModelForSemanticSegmentation extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES]; } /** * Helper class which is used to instantiate pretrained universal image segmentation models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModelForUniversalSegmentation.from_pretrained('hf-internal-testing/tiny-random-MaskFormerForInstanceSegmentation'); */ export class AutoModelForUniversalSegmentation extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING_NAMES]; } /** * Helper class which is used to instantiate pretrained object detection models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModelForObjectDetection.from_pretrained('Xenova/detr-resnet-50'); */ export class AutoModelForObjectDetection extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES]; } export class AutoModelForZeroShotObjectDetection extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES]; } /** * Helper class which is used to instantiate pretrained mask generation models with the `from_pretrained` function. * The chosen model class is determined by the type specified in the model config. * * @example * let model = await AutoModelForMaskGeneration.from_pretrained('Xenova/sam-vit-base'); */ export class AutoModelForMaskGeneration extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_MASK_GENERATION_MAPPING_NAMES]; } export class AutoModelForCTC extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_CTC_MAPPING_NAMES]; } export class AutoModelForAudioClassification extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES]; } export class AutoModelForXVector extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES]; } export class AutoModelForAudioFrameClassification extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES]; } export class AutoModelForDocumentQuestionAnswering extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES]; } export class AutoModelForImageMatting extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_IMAGE_MATTING_MAPPING_NAMES]; } export class AutoModelForImageToImage extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES]; } export class AutoModelForDepthEstimation extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES]; } export class AutoModelForNormalEstimation extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_NORMAL_ESTIMATION_MAPPING_NAMES]; } export class AutoModelForPoseEstimation extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_POSE_ESTIMATION_MAPPING_NAMES]; } export class AutoModelForImageFeatureExtraction extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_IMAGE_FEATURE_EXTRACTION_MAPPING_NAMES]; } export class AutoModelForImageTextToText extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING_NAMES]; } export class AutoModelForAudioTextToText extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_AUDIO_TEXT_TO_TEXT_MAPPING_NAMES]; } ////////////////////////////////////////////////// ////////////////////////////////////////////////// export class Seq2SeqLMOutput extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.logits The output logits of the model. * @param {Tensor} output.past_key_values An tensor of key/value pairs that represent the previous state of the model. * @param {Tensor} output.encoder_outputs The output of the encoder in a sequence-to-sequence model. * @param {Tensor} [output.decoder_attentions] Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. * @param {Tensor} [output.cross_attentions] Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. */ constructor({ logits, past_key_values, encoder_outputs, decoder_attentions = null, cross_attentions = null }) { super(); this.logits = logits; this.past_key_values = past_key_values; this.encoder_outputs = encoder_outputs; this.decoder_attentions = decoder_attentions; this.cross_attentions = cross_attentions; } } /** * Base class for outputs of sentence classification models. */ export class SequenceClassifierOutput extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.logits classification (or regression if config.num_labels==1) scores (before SoftMax). * @param {Record<string, Tensor>} [output.attentions] Object of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. * Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. */ constructor({ logits, ...attentions }) { super(); this.logits = logits; const attentions_list = Object.values(attentions); if (attentions_list.length > 0) { // Only set attentions if they are not empty this.attentions = attentions_list; } } } /** * Base class for outputs of XVector models. */ export class XVectorOutput extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.logits Classification hidden states before AMSoftmax, of shape `(batch_size, config.xvector_output_dim)`. * @param {Tensor} output.embeddings Utterance embeddings used for vector similarity-based retrieval, of shape `(batch_size, config.xvector_output_dim)`. */ constructor({ logits, embeddings }) { super(); this.logits = logits; this.embeddings = embeddings; } } /** * Base class for outputs of token classification models. */ export class TokenClassifierOutput extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.logits Classification scores (before SoftMax). */ constructor({ logits }) { super(); this.logits = logits; } } /** * Base class for masked language models outputs. */ export class MaskedLMOutput extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.logits Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). */ constructor({ logits }) { super(); this.logits = logits; } } /** * Base class for outputs of question answering models. */ export class QuestionAnsweringModelOutput extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.start_logits Span-start scores (before SoftMax). * @param {Tensor} output.end_logits Span-end scores (before SoftMax). */ constructor({ start_logits, end_logits }) { super(); this.start_logits = start_logits; this.end_logits = end_logits; } } /** * Base class for causal language model (or autoregressive) outputs. */ export class CausalLMOutput extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.logits Prediction scores of the language modeling head (scores for each vocabulary token before softmax). */ constructor({ logits }) { super(); this.logits = logits; } } /** * Base class for causal language model (or autoregressive) outputs. */ export class CausalLMOutputWithPast extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.logits Prediction scores of the language modeling head (scores for each vocabulary token before softmax). * @param {Tensor} output.past_key_values Contains pre-computed hidden-states (key and values in the self-attention blocks) * that can be used (see `past_key_values` input) to speed up sequential decoding. */ constructor({ logits, past_key_values }) { super(); this.logits = logits; this.past_key_values = past_key_values; } } export class ImageMattingOutput extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.alphas Estimated alpha values, of shape `(batch_size, num_channels, height, width)`. */ constructor({ alphas }) { super(); this.alphas = alphas; } } /** * Describes the outputs for the VITS model. */ export class VitsModelOutput extends ModelOutput { /** * @param {Object} output The output of the model. * @param {Tensor} output.waveform The final audio waveform predicted by the model, of shape `(batch_size, sequence_length)`. * @param {Tensor} output.spectrogram The log-mel spectrogram predicted at the output of the flow model. * This spectrogram is passed to the Hi-Fi GAN decoder model to obtain the final audio waveform. */ constructor({ waveform, spectrogram }) { super(); this.waveform = waveform; this.spectrogram = spectrogram; } }
transformers.js/src/models.js/0
{ "file_path": "transformers.js/src/models.js", "repo_id": "transformers.js", "token_count": 127141 }
347
import { ImageProcessor, } from "../../base/image_processors_utils.js"; export class DPTImageProcessor extends ImageProcessor { } export class DPTFeatureExtractor extends DPTImageProcessor { } // NOTE: extends DPTImageProcessor
transformers.js/src/models/dpt/image_processing_dpt.js/0
{ "file_path": "transformers.js/src/models/dpt/image_processing_dpt.js", "repo_id": "transformers.js", "token_count": 70 }
348
import { Processor } from "../../base/processing_utils.js"; import { AutoImageProcessor } from "../auto/image_processing_auto.js"; import { AutoTokenizer } from "../../tokenizers.js"; export class JinaCLIPProcessor extends Processor { static tokenizer_class = AutoTokenizer static image_processor_class = AutoImageProcessor async _call(text=null, images=null, kwargs = {}) { if (!text && !images){ throw new Error('Either text or images must be provided'); } const text_inputs = text ? this.tokenizer(text, kwargs) : {}; const image_inputs = images ? await this.image_processor(images, kwargs) : {}; return { ...text_inputs, ...image_inputs, } } }
transformers.js/src/models/jina_clip/processing_jina_clip.js/0
{ "file_path": "transformers.js/src/models/jina_clip/processing_jina_clip.js", "repo_id": "transformers.js", "token_count": 291 }
349
import { Processor } from "../../base/processing_utils.js"; import { AutoImageProcessor } from "../auto/image_processing_auto.js"; import { AutoTokenizer } from "../../tokenizers.js"; export class OwlViTProcessor extends Processor { static tokenizer_class = AutoTokenizer static image_processor_class = AutoImageProcessor }
transformers.js/src/models/owlvit/processing_owlvit.js/0
{ "file_path": "transformers.js/src/models/owlvit/processing_owlvit.js", "repo_id": "transformers.js", "token_count": 95 }
350
import { ImageProcessor, } from "../../base/image_processors_utils.js"; export class SiglipImageProcessor extends ImageProcessor { }
transformers.js/src/models/siglip/image_processing_siglip.js/0
{ "file_path": "transformers.js/src/models/siglip/image_processing_siglip.js", "repo_id": "transformers.js", "token_count": 44 }
351
const WHISPER_LANGUAGES = [ ["en", "english"], ["zh", "chinese"], ["de", "german"], ["es", "spanish"], ["ru", "russian"], ["ko", "korean"], ["fr", "french"], ["ja", "japanese"], ["pt", "portuguese"], ["tr", "turkish"], ["pl", "polish"], ["ca", "catalan"], ["nl", "dutch"], ["ar", "arabic"], ["sv", "swedish"], ["it", "italian"], ["id", "indonesian"], ["hi", "hindi"], ["fi", "finnish"], ["vi", "vietnamese"], ["he", "hebrew"], ["uk", "ukrainian"], ["el", "greek"], ["ms", "malay"], ["cs", "czech"], ["ro", "romanian"], ["da", "danish"], ["hu", "hungarian"], ["ta", "tamil"], ["no", "norwegian"], ["th", "thai"], ["ur", "urdu"], ["hr", "croatian"], ["bg", "bulgarian"], ["lt", "lithuanian"], ["la", "latin"], ["mi", "maori"], ["ml", "malayalam"], ["cy", "welsh"], ["sk", "slovak"], ["te", "telugu"], ["fa", "persian"], ["lv", "latvian"], ["bn", "bengali"], ["sr", "serbian"], ["az", "azerbaijani"], ["sl", "slovenian"], ["kn", "kannada"], ["et", "estonian"], ["mk", "macedonian"], ["br", "breton"], ["eu", "basque"], ["is", "icelandic"], ["hy", "armenian"], ["ne", "nepali"], ["mn", "mongolian"], ["bs", "bosnian"], ["kk", "kazakh"], ["sq", "albanian"], ["sw", "swahili"], ["gl", "galician"], ["mr", "marathi"], ["pa", "punjabi"], ["si", "sinhala"], ["km", "khmer"], ["sn", "shona"], ["yo", "yoruba"], ["so", "somali"], ["af", "afrikaans"], ["oc", "occitan"], ["ka", "georgian"], ["be", "belarusian"], ["tg", "tajik"], ["sd", "sindhi"], ["gu", "gujarati"], ["am", "amharic"], ["yi", "yiddish"], ["lo", "lao"], ["uz", "uzbek"], ["fo", "faroese"], ["ht", "haitian creole"], ["ps", "pashto"], ["tk", "turkmen"], ["nn", "nynorsk"], ["mt", "maltese"], ["sa", "sanskrit"], ["lb", "luxembourgish"], ["my", "myanmar"], ["bo", "tibetan"], ["tl", "tagalog"], ["mg", "malagasy"], ["as", "assamese"], ["tt", "tatar"], ["haw", "hawaiian"], ["ln", "lingala"], ["ha", "hausa"], ["ba", "bashkir"], ["jw", "javanese"], ["su", "sundanese"], ] // @ts-ignore export const WHISPER_LANGUAGE_MAPPING = new Map(WHISPER_LANGUAGES); // @ts-ignore export const WHISPER_TO_LANGUAGE_CODE_MAPPING = new Map([ ...WHISPER_LANGUAGES.map(([k, v]) => [v, k]), ...[ ["burmese", "my"], ["valencian", "ca"], ["flemish", "nl"], ["haitian", "ht"], ["letzeburgesch", "lb"], ["pushto", "ps"], ["panjabi", "pa"], ["moldavian", "ro"], ["moldovan", "ro"], ["sinhalese", "si"], ["castilian", "es"], ] ]); /** * @param {string} language The language name or code * @returns {string} The language code */ export function whisper_language_to_code(language) { language = language.toLowerCase(); // Map to code from user-friendly name (e.g., "english" -> "en") let language_code = WHISPER_TO_LANGUAGE_CODE_MAPPING.get(language); if (language_code === undefined) { // User provided something that is not a language name // Perhaps the user passed the special token itself const language_special_token = language.match(/^<\|([a-z]{2})\|>$/); if (language_special_token) { language = language_special_token[1]; } if (WHISPER_LANGUAGE_MAPPING.has(language)) { // User provided the language code directly (e.g., "en") language_code = language; } else { // User provided something that is not a language code or name const is_language_code = language.length === 2; const langs = is_language_code ? WHISPER_LANGUAGE_MAPPING.keys() : WHISPER_LANGUAGE_MAPPING.values(); throw new Error(`Language "${language}" is not supported. Must be one of: ${JSON.stringify(Array.from(langs))}`); } } return language_code; }
transformers.js/src/models/whisper/common_whisper.js/0
{ "file_path": "transformers.js/src/models/whisper/common_whisper.js", "repo_id": "transformers.js", "token_count": 1956 }
352
/** * @file Utility functions to interact with the Hugging Face Hub (https://huggingface.co/models) * * @module utils/hub */ import fs from 'node:fs'; import path from 'node:path'; import { apis, env } from '../env.js'; import { dispatchCallback } from './core.js'; /** * @typedef {boolean|number} ExternalData Whether to load the model using the external data format (used for models >= 2GB in size). * If `true`, the model will be loaded using the external data format. * If a number, this many chunks will be loaded using the external data format (of the form: "model.onnx_data[_{chunk_number}]"). */ export const MAX_EXTERNAL_DATA_CHUNKS = 100; /** * @typedef {Object} PretrainedOptions Options for loading a pretrained model. * @property {import('./core.js').ProgressCallback} [progress_callback=null] If specified, this function will be called during model construction, to provide the user with progress updates. * @property {import('../configs.js').PretrainedConfig} [config=null] Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when: * - The model is a model provided by the library (loaded with the *model id* string of a pretrained model). * - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a configuration JSON file named *config.json* is found in the directory. * @property {string} [cache_dir=null] Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. * @property {boolean} [local_files_only=false] Whether or not to only look at local files (e.g., not try downloading the model). * @property {string} [revision='main'] The specific model version to use. It can be a branch name, a tag name, or a commit id, * since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. * NOTE: This setting is ignored for local requests. */ /** * @typedef {Object} ModelSpecificPretrainedOptions Options for loading a pretrained model. * @property {string} [subfolder='onnx'] In case the relevant files are located inside a subfolder of the model repo on huggingface.co, * you can specify the folder name here. * @property {string} [model_file_name=null] If specified, load the model with this name (excluding the .onnx suffix). Currently only valid for encoder- or decoder-only models. * @property {import("./devices.js").DeviceType|Record<string, import("./devices.js").DeviceType>} [device=null] The device to run the model on. If not specified, the device will be chosen from the environment settings. * @property {import("./dtypes.js").DataType|Record<string, import("./dtypes.js").DataType>} [dtype=null] The data type to use for the model. If not specified, the data type will be chosen from the environment settings. * @property {ExternalData|Record<string, ExternalData>} [use_external_data_format=false] Whether to load the model using the external data format (used for models >= 2GB in size). * @property {import('onnxruntime-common').InferenceSession.SessionOptions} [session_options] (Optional) User-specified session options passed to the runtime. If not provided, suitable defaults will be chosen. */ /** * @typedef {PretrainedOptions & ModelSpecificPretrainedOptions} PretrainedModelOptions Options for loading a pretrained model. */ /** * Mapping from file extensions to MIME types. */ const CONTENT_TYPE_MAP = { 'txt': 'text/plain', 'html': 'text/html', 'css': 'text/css', 'js': 'text/javascript', 'json': 'application/json', 'png': 'image/png', 'jpg': 'image/jpeg', 'jpeg': 'image/jpeg', 'gif': 'image/gif', } class FileResponse { /** * Creates a new `FileResponse` object. * @param {string} filePath */ constructor(filePath) { this.filePath = filePath; this.headers = new Headers(); this.exists = fs.existsSync(filePath); if (this.exists) { this.status = 200; this.statusText = 'OK'; let stats = fs.statSync(filePath); this.headers.set('content-length', stats.size.toString()); this.updateContentType(); const stream = fs.createReadStream(filePath); this.body = new ReadableStream({ start(controller) { stream.on('data', (chunk) => controller.enqueue(chunk)); stream.on('end', () => controller.close()); stream.on('error', (err) => controller.error(err)); }, cancel() { stream.destroy(); } }); } else { this.status = 404; this.statusText = 'Not Found'; this.body = null; } } /** * Updates the 'content-type' header property of the response based on the extension of * the file specified by the filePath property of the current object. * @returns {void} */ updateContentType() { // Set content-type header based on file extension const extension = this.filePath.toString().split('.').pop().toLowerCase(); this.headers.set('content-type', CONTENT_TYPE_MAP[extension] ?? 'application/octet-stream'); } /** * Clone the current FileResponse object. * @returns {FileResponse} A new FileResponse object with the same properties as the current object. */ clone() { let response = new FileResponse(this.filePath); response.exists = this.exists; response.status = this.status; response.statusText = this.statusText; response.headers = new Headers(this.headers); return response; } /** * Reads the contents of the file specified by the filePath property and returns a Promise that * resolves with an ArrayBuffer containing the file's contents. * @returns {Promise<ArrayBuffer>} A Promise that resolves with an ArrayBuffer containing the file's contents. * @throws {Error} If the file cannot be read. */ async arrayBuffer() { const data = await fs.promises.readFile(this.filePath); return /** @type {ArrayBuffer} */ (data.buffer); } /** * Reads the contents of the file specified by the filePath property and returns a Promise that * resolves with a Blob containing the file's contents. * @returns {Promise<Blob>} A Promise that resolves with a Blob containing the file's contents. * @throws {Error} If the file cannot be read. */ async blob() { const data = await fs.promises.readFile(this.filePath); return new Blob([data], { type: this.headers.get('content-type') }); } /** * Reads the contents of the file specified by the filePath property and returns a Promise that * resolves with a string containing the file's contents. * @returns {Promise<string>} A Promise that resolves with a string containing the file's contents. * @throws {Error} If the file cannot be read. */ async text() { const data = await fs.promises.readFile(this.filePath, 'utf8'); return data; } /** * Reads the contents of the file specified by the filePath property and returns a Promise that * resolves with a parsed JavaScript object containing the file's contents. * * @returns {Promise<Object>} A Promise that resolves with a parsed JavaScript object containing the file's contents. * @throws {Error} If the file cannot be read. */ async json() { return JSON.parse(await this.text()); } } /** * Determines whether the given string is a valid URL. * @param {string|URL} string The string to test for validity as an URL. * @param {string[]} [protocols=null] A list of valid protocols. If specified, the protocol must be in this list. * @param {string[]} [validHosts=null] A list of valid hostnames. If specified, the URL's hostname must be in this list. * @returns {boolean} True if the string is a valid URL, false otherwise. */ function isValidUrl(string, protocols = null, validHosts = null) { let url; try { url = new URL(string); } catch (_) { return false; } if (protocols && !protocols.includes(url.protocol)) { return false; } if (validHosts && !validHosts.includes(url.hostname)) { return false; } return true; } const REPO_ID_REGEX = /^(\b[\w\-.]+\b\/)?\b[\w\-.]{1,96}\b$/; /** * Tests whether a string is a valid Hugging Face model ID or not. * Adapted from https://github.com/huggingface/huggingface_hub/blob/6378820ebb03f071988a96c7f3268f5bdf8f9449/src/huggingface_hub/utils/_validators.py#L119-L170 * * @param {string} string The string to test * @returns {boolean} True if the string is a valid model ID, false otherwise. */ function isValidHfModelId(string) { if (!REPO_ID_REGEX.test(string)) return false; if (string.includes("..") || string.includes("--")) return false; if (string.endsWith(".git") || string.endsWith(".ipynb")) return false; return true; } /** * Helper function to get a file, using either the Fetch API or FileSystem API. * * @param {URL|string} urlOrPath The URL/path of the file to get. * @returns {Promise<FileResponse|Response>} A promise that resolves to a FileResponse object (if the file is retrieved using the FileSystem API), or a Response object (if the file is retrieved using the Fetch API). */ export async function getFile(urlOrPath) { if (env.useFS && !isValidUrl(urlOrPath, ["http:", "https:", "blob:"])) { return new FileResponse( urlOrPath instanceof URL ? urlOrPath.protocol === "file:" ? urlOrPath.pathname : urlOrPath.toString() : urlOrPath, ); } else if (typeof process !== 'undefined' && process?.release?.name === 'node') { const IS_CI = !!process.env?.TESTING_REMOTELY; const version = env.version; const headers = new Headers(); headers.set('User-Agent', `transformers.js/${version}; is_ci/${IS_CI};`); // Check whether we are making a request to the Hugging Face Hub. const isHFURL = isValidUrl(urlOrPath, ['http:', 'https:'], ['huggingface.co', 'hf.co']); if (isHFURL) { // If an access token is present in the environment variables, // we add it to the request headers. // NOTE: We keep `HF_ACCESS_TOKEN` for backwards compatibility (as a fallback). const token = process.env?.HF_TOKEN ?? process.env?.HF_ACCESS_TOKEN; if (token) { headers.set('Authorization', `Bearer ${token}`); } } return fetch(urlOrPath, { headers }); } else { // Running in a browser-environment, so we use default headers // NOTE: We do not allow passing authorization headers in the browser, // since this would require exposing the token to the client. return fetch(urlOrPath); } } const ERROR_MAPPING = { // 4xx errors (https://developer.mozilla.org/en-US/docs/Web/HTTP/Status#client_error_responses) 400: 'Bad request error occurred while trying to load file', 401: 'Unauthorized access to file', 403: 'Forbidden access to file', 404: 'Could not locate file', 408: 'Request timeout error occurred while trying to load file', // 5xx errors (https://developer.mozilla.org/en-US/docs/Web/HTTP/Status#server_error_responses) 500: 'Internal server error error occurred while trying to load file', 502: 'Bad gateway error occurred while trying to load file', 503: 'Service unavailable error occurred while trying to load file', 504: 'Gateway timeout error occurred while trying to load file', } /** * Helper method to handle fatal errors that occur while trying to load a file from the Hugging Face Hub. * @param {number} status The HTTP status code of the error. * @param {string} remoteURL The URL of the file that could not be loaded. * @param {boolean} fatal Whether to raise an error if the file could not be loaded. * @returns {null} Returns `null` if `fatal = true`. * @throws {Error} If `fatal = false`. */ function handleError(status, remoteURL, fatal) { if (!fatal) { // File was not loaded correctly, but it is optional. // TODO in future, cache the response? return null; } const message = ERROR_MAPPING[status] ?? `Error (${status}) occurred while trying to load file`; throw Error(`${message}: "${remoteURL}".`); } class FileCache { /** * Instantiate a `FileCache` object. * @param {string} path */ constructor(path) { this.path = path; } /** * Checks whether the given request is in the cache. * @param {string} request * @returns {Promise<FileResponse | undefined>} */ async match(request) { let filePath = path.join(this.path, request); let file = new FileResponse(filePath); if (file.exists) { return file; } else { return undefined; } } /** * Adds the given response to the cache. * @param {string} request * @param {Response} response * @param {(data: {progress: number, loaded: number, total: number}) => void} [progress_callback] Optional. * The function to call with progress updates * @returns {Promise<void>} */ async put(request, response, progress_callback = undefined) { let filePath = path.join(this.path, request); try { const contentLength = response.headers.get('Content-Length'); const total = parseInt(contentLength ?? '0'); let loaded = 0; await fs.promises.mkdir(path.dirname(filePath), { recursive: true }); const fileStream = fs.createWriteStream(filePath); const reader = response.body.getReader(); while (true) { const { done, value } = await reader.read(); if (done) { break; } await new Promise((resolve, reject) => { fileStream.write(value, (err) => { if (err) { reject(err); return; } resolve(); }); }); loaded += value.length; const progress = total ? (loaded / total) * 100 : 0; progress_callback?.({ progress, loaded, total }); } fileStream.close(); } catch (error) { // Clean up the file if an error occurred during download try { await fs.promises.unlink(filePath); } catch { } throw error; } } // TODO add the rest? // addAll(requests: RequestInfo[]): Promise<void>; // delete(request: RequestInfo | URL, options?: CacheQueryOptions): Promise<boolean>; // keys(request?: RequestInfo | URL, options?: CacheQueryOptions): Promise<ReadonlyArray<Request>>; // match(request: RequestInfo | URL, options?: CacheQueryOptions): Promise<Response | undefined>; // matchAll(request?: RequestInfo | URL, options?: CacheQueryOptions): Promise<ReadonlyArray<Response>>; } /** * * @param {FileCache|Cache} cache The cache to search * @param {string[]} names The names of the item to search for * @returns {Promise<FileResponse|Response|undefined>} The item from the cache, or undefined if not found. */ async function tryCache(cache, ...names) { for (let name of names) { try { let result = await cache.match(name); if (result) return result; } catch (e) { continue; } } return undefined; } /** * Retrieves a file from either a remote URL using the Fetch API or from the local file system using the FileSystem API. * If the filesystem is available and `env.useCache = true`, the file will be downloaded and cached. * * @param {string} path_or_repo_id This can be either: * - a string, the *model id* of a model repo on huggingface.co. * - a path to a *directory* potentially containing the file. * @param {string} filename The name of the file to locate in `path_or_repo`. * @param {boolean} [fatal=true] Whether to throw an error if the file is not found. * @param {PretrainedOptions} [options] An object containing optional parameters. * @param {boolean} [return_path=false] Whether to return the path of the file instead of the file content. * * @throws Will throw an error if the file is not found and `fatal` is true. * @returns {Promise<string|Uint8Array>} A Promise that resolves with the file content as a Uint8Array if `return_path` is false, or the file path as a string if `return_path` is true. */ export async function getModelFile(path_or_repo_id, filename, fatal = true, options = {}, return_path = false) { if (!env.allowLocalModels) { // User has disabled local models, so we just make sure other settings are correct. if (options.local_files_only) { throw Error("Invalid configuration detected: local models are disabled (`env.allowLocalModels=false`) but you have requested to only use local models (`local_files_only=true`).") } else if (!env.allowRemoteModels) { throw Error("Invalid configuration detected: both local and remote models are disabled. Fix by setting `env.allowLocalModels` or `env.allowRemoteModels` to `true`.") } } // Initiate file retrieval dispatchCallback(options.progress_callback, { status: 'initiate', name: path_or_repo_id, file: filename }) // First, check if the a caching backend is available // If no caching mechanism available, will download the file every time let cache; if (!cache && env.useCustomCache) { // Allow the user to specify a custom cache system. if (!env.customCache) { throw Error('`env.useCustomCache=true`, but `env.customCache` is not defined.') } // Check that the required methods are defined: if (!env.customCache.match || !env.customCache.put) { throw new Error( "`env.customCache` must be an object which implements the `match` and `put` functions of the Web Cache API. " + "For more information, see https://developer.mozilla.org/en-US/docs/Web/API/Cache" ) } cache = env.customCache; } if (!cache && env.useBrowserCache) { if (typeof caches === 'undefined') { throw Error('Browser cache is not available in this environment.') } try { // In some cases, the browser cache may be visible, but not accessible due to security restrictions. // For example, when running an application in an iframe, if a user attempts to load the page in // incognito mode, the following error is thrown: `DOMException: Failed to execute 'open' on 'CacheStorage': // An attempt was made to break through the security policy of the user agent.` // So, instead of crashing, we just ignore the error and continue without using the cache. cache = await caches.open('transformers-cache'); } catch (e) { console.warn('An error occurred while opening the browser cache:', e); } } if (!cache && env.useFSCache) { if (!apis.IS_FS_AVAILABLE) { throw Error('File System Cache is not available in this environment.'); } // If `cache_dir` is not specified, use the default cache directory cache = new FileCache(options.cache_dir ?? env.cacheDir); } const revision = options.revision ?? 'main'; const requestURL = pathJoin(path_or_repo_id, filename); const validModelId = isValidHfModelId(path_or_repo_id); const localPath = validModelId ? pathJoin(env.localModelPath, requestURL) : requestURL; const remoteURL = pathJoin( env.remoteHost, env.remotePathTemplate .replaceAll('{model}', path_or_repo_id) .replaceAll('{revision}', encodeURIComponent(revision)), filename ); /** @type {string} */ let cacheKey; const proposedCacheKey = cache instanceof FileCache // Choose cache key for filesystem cache // When using the main revision (default), we use the request URL as the cache key. // If a specific revision is requested, we account for this in the cache key. ? revision === 'main' ? requestURL : pathJoin(path_or_repo_id, revision, filename) : remoteURL; // Whether to cache the final response in the end. let toCacheResponse = false; /** @type {Response|FileResponse|undefined} */ let response; if (cache) { // A caching system is available, so we try to get the file from it. // 1. We first try to get from cache using the local path. In some environments (like deno), // non-URL cache keys are not allowed. In these cases, `response` will be undefined. // 2. If no response is found, we try to get from cache using the remote URL or file system cache. response = await tryCache(cache, localPath, proposedCacheKey); } const cacheHit = response !== undefined; if (response === undefined) { // Caching not available, or file is not cached, so we perform the request if (env.allowLocalModels) { // Accessing local models is enabled, so we try to get the file locally. // If request is a valid HTTP URL, we skip the local file check. Otherwise, we try to get the file locally. const isURL = isValidUrl(requestURL, ['http:', 'https:']); if (!isURL) { try { response = await getFile(localPath); cacheKey = localPath; // Update the cache key to be the local path } catch (e) { // Something went wrong while trying to get the file locally. // NOTE: error handling is done in the next step (since `response` will be undefined) console.warn(`Unable to load from local path "${localPath}": "${e}"`); } } else if (options.local_files_only) { throw new Error(`\`local_files_only=true\`, but attempted to load a remote file from: ${requestURL}.`); } else if (!env.allowRemoteModels) { throw new Error(`\`env.allowRemoteModels=false\`, but attempted to load a remote file from: ${requestURL}.`); } } if (response === undefined || response.status === 404) { // File not found locally. This means either: // - The user has disabled local file access (`env.allowLocalModels=false`) // - the path is a valid HTTP url (`response === undefined`) // - the path is not a valid HTTP url and the file is not present on the file system or local server (`response.status === 404`) if (options.local_files_only || !env.allowRemoteModels) { // User requested local files only, but the file is not found locally. if (fatal) { throw Error(`\`local_files_only=true\` or \`env.allowRemoteModels=false\` and file was not found locally at "${localPath}".`); } else { // File not found, but this file is optional. // TODO in future, cache the response? return null; } } if (!validModelId) { // Before making any requests to the remote server, we check if the model ID is valid. // This prevents unnecessary network requests for invalid model IDs. throw Error(`Local file missing at "${localPath}" and download aborted due to invalid model ID "${path_or_repo_id}".`); } // File not found locally, so we try to download it from the remote server response = await getFile(remoteURL); if (response.status !== 200) { return handleError(response.status, remoteURL, fatal); } // Success! We use the proposed cache key from earlier cacheKey = proposedCacheKey; } // Only cache the response if: toCacheResponse = cache // 1. A caching system is available && typeof Response !== 'undefined' // 2. `Response` is defined (i.e., we are in a browser-like environment) && response instanceof Response // 3. result is a `Response` object (i.e., not a `FileResponse`) && response.status === 200 // 4. request was successful (status code 200) } // Start downloading dispatchCallback(options.progress_callback, { status: 'download', name: path_or_repo_id, file: filename }) let result; if (!(apis.IS_NODE_ENV && return_path)) { /** @type {Uint8Array} */ let buffer; if (!options.progress_callback) { // If no progress callback is specified, we can use the `.arrayBuffer()` // method to read the response. buffer = new Uint8Array(await response.arrayBuffer()); } else if ( cacheHit // The item is being read from the cache && typeof navigator !== 'undefined' && /firefox/i.test(navigator.userAgent) // We are in Firefox ) { // Due to bug in Firefox, we cannot display progress when loading from cache. // Fortunately, since this should be instantaneous, this should not impact users too much. buffer = new Uint8Array(await response.arrayBuffer()); // For completeness, we still fire the final progress callback dispatchCallback(options.progress_callback, { status: 'progress', name: path_or_repo_id, file: filename, progress: 100, loaded: buffer.length, total: buffer.length, }) } else { buffer = await readResponse(response, data => { dispatchCallback(options.progress_callback, { status: 'progress', name: path_or_repo_id, file: filename, ...data, }) }) } result = buffer; } if ( // Only cache web responses // i.e., do not cache FileResponses (prevents duplication) toCacheResponse && cacheKey && // Check again whether request is in cache. If not, we add the response to the cache (await cache.match(cacheKey) === undefined) ) { if (!result) { // We haven't yet read the response body, so we need to do so now. await cache.put(cacheKey, /** @type {Response} */(response), options.progress_callback); } else { // NOTE: We use `new Response(buffer, ...)` instead of `response.clone()` to handle LFS files await cache.put(cacheKey, new Response(result, { headers: response.headers })) .catch(err => { // Do not crash if unable to add to cache (e.g., QuotaExceededError). // Rather, log a warning and proceed with execution. console.warn(`Unable to add response to browser cache: ${err}.`); }); } } dispatchCallback(options.progress_callback, { status: 'done', name: path_or_repo_id, file: filename }); if (result) { if (!apis.IS_NODE_ENV && return_path) { throw new Error("Cannot return path in a browser environment.") } return result; } if (response instanceof FileResponse) { return response.filePath; } // Otherwise, return the cached response (most likely a `FileResponse`). // NOTE: A custom cache may return a Response, or a string (file path) const cachedResponse = await cache?.match(cacheKey); if (cachedResponse instanceof FileResponse) { return cachedResponse.filePath; } else if (cachedResponse instanceof Response) { return new Uint8Array(await cachedResponse.arrayBuffer()); } else if (typeof cachedResponse === 'string') { return cachedResponse; } throw new Error("Unable to get model file path or buffer."); } /** * Fetches a text file from a given path and file name. * * @param {string} modelPath The path to the directory containing the file. * @param {string} fileName The name of the file to fetch. * @param {boolean} [fatal=true] Whether to throw an error if the file is not found. * @param {PretrainedOptions} [options] An object containing optional parameters. * @returns {Promise<string|null>} The text content of the file. * @throws Will throw an error if the file is not found and `fatal` is true. */ export async function getModelText(modelPath, fileName, fatal = true, options = {}) { const buffer = await getModelFile(modelPath, fileName, fatal, options, false); if (buffer === null) { return null; } const decoder = new TextDecoder('utf-8'); return decoder.decode(/** @type {Uint8Array} */(buffer)); } /** * Fetches a JSON file from a given path and file name. * * @param {string} modelPath The path to the directory containing the file. * @param {string} fileName The name of the file to fetch. * @param {boolean} [fatal=true] Whether to throw an error if the file is not found. * @param {PretrainedOptions} [options] An object containing optional parameters. * @returns {Promise<Object>} The JSON data parsed into a JavaScript object. * @throws Will throw an error if the file is not found and `fatal` is true. */ export async function getModelJSON(modelPath, fileName, fatal = true, options = {}) { const text = await getModelText(modelPath, fileName, fatal, options); if (text === null) { // Return empty object return {}; } return JSON.parse(text); } /** * Read and track progress when reading a Response object * * @param {Response|FileResponse} response The Response object to read * @param {(data: {progress: number, loaded: number, total: number}) => void} progress_callback The function to call with progress updates * @returns {Promise<Uint8Array>} A Promise that resolves with the Uint8Array buffer */ async function readResponse(response, progress_callback) { const contentLength = response.headers.get('Content-Length'); if (contentLength === null) { console.warn('Unable to determine content-length from response headers. Will expand buffer when needed.') } let total = parseInt(contentLength ?? '0'); let buffer = new Uint8Array(total); let loaded = 0; const reader = response.body.getReader(); async function read() { const { done, value } = await reader.read(); if (done) return; const newLoaded = loaded + value.length; if (newLoaded > total) { total = newLoaded; // Adding the new data will overflow buffer. // In this case, we extend the buffer const newBuffer = new Uint8Array(total); // copy contents newBuffer.set(buffer); buffer = newBuffer; } buffer.set(value, loaded); loaded = newLoaded; const progress = (loaded / total) * 100; // Call your function here progress_callback({ progress, loaded, total }); return read(); } // Actually read await read(); return buffer; } /** * Joins multiple parts of a path into a single path, while handling leading and trailing slashes. * * @param {...string} parts Multiple parts of a path. * @returns {string} A string representing the joined path. */ function pathJoin(...parts) { // https://stackoverflow.com/a/55142565 parts = parts.map((part, index) => { if (index) { part = part.replace(new RegExp('^/'), ''); } if (index !== parts.length - 1) { part = part.replace(new RegExp('/$'), ''); } return part; }) return parts.join('/'); }
transformers.js/src/utils/hub.js/0
{ "file_path": "transformers.js/src/utils/hub.js", "repo_id": "transformers.js", "token_count": 12319 }
353
import { BertTokenizer, BertModel, BertForMaskedLM, BertForSequenceClassification, BertForTokenClassification, BertForQuestionAnswering } from "../../../src/transformers.js"; import { MAX_MODEL_LOAD_TIME, MAX_TEST_EXECUTION_TIME, MAX_MODEL_DISPOSE_TIME, DEFAULT_MODEL_OPTIONS } from "../../init.js"; export default () => { describe("BertModel", () => { const model_id = "hf-internal-testing/tiny-random-BertModel"; /** @type {BertModel} */ let model; /** @type {BertTokenizer} */ let tokenizer; beforeAll(async () => { model = await BertModel.from_pretrained(model_id, DEFAULT_MODEL_OPTIONS); tokenizer = await BertTokenizer.from_pretrained(model_id); }, MAX_MODEL_LOAD_TIME); it( "batch_size=1", async () => { const inputs = tokenizer("hello"); const { last_hidden_state } = await model(inputs); expect(last_hidden_state.dims).toEqual([1, 7, 32]); expect(last_hidden_state.mean().item()).toBeCloseTo(0.0, 5); }, MAX_TEST_EXECUTION_TIME, ); it( "batch_size>1", async () => { const inputs = tokenizer(["hello", "hello world"], { padding: true }); const { last_hidden_state } = await model(inputs); expect(last_hidden_state.dims).toEqual([2, 12, 32]); expect(last_hidden_state.mean().item()).toBeCloseTo(1.4901161193847656e-8, 5); }, MAX_TEST_EXECUTION_TIME, ); afterAll(async () => { await model?.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); describe("BertForMaskedLM", () => { const model_id = "hf-internal-testing/tiny-random-BertForMaskedLM"; const texts = ["The goal of life is [MASK].", "Paris is the [MASK] of France."]; /** @type {BertForMaskedLM} */ let model; /** @type {BertTokenizer} */ let tokenizer; beforeAll(async () => { model = await BertForMaskedLM.from_pretrained(model_id, DEFAULT_MODEL_OPTIONS); tokenizer = await BertTokenizer.from_pretrained(model_id); }, MAX_MODEL_LOAD_TIME); it( "batch_size=1", async () => { const inputs = tokenizer(texts[0]); const { logits } = await model(inputs); expect(logits.dims).toEqual([1, 19, 1124]); expect(logits.mean().item()).toBeCloseTo(0.0016587056452408433, 5); }, MAX_TEST_EXECUTION_TIME, ); it( "batch_size>1", async () => { const inputs = tokenizer(texts, { padding: true }); const { logits } = await model(inputs); expect(logits.dims).toEqual([2, 22, 1124]); expect(logits.mean().item()).toBeCloseTo(0.0017160633578896523, 5); }, MAX_TEST_EXECUTION_TIME, ); afterAll(async () => { await model?.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); describe("BertForSequenceClassification", () => { const model_id = "hf-internal-testing/tiny-random-BertForSequenceClassification"; /** @type {BertForSequenceClassification} */ let model; /** @type {BertTokenizer} */ let tokenizer; beforeAll(async () => { model = await BertForSequenceClassification.from_pretrained(model_id, DEFAULT_MODEL_OPTIONS); tokenizer = await BertTokenizer.from_pretrained(model_id); }, MAX_MODEL_LOAD_TIME); it( "batch_size=1", async () => { const inputs = tokenizer("hello"); const { logits } = await model(inputs); const target = [[0.00043986947275698185, -0.030218850821256638]]; expect(logits.dims).toEqual([1, 2]); expect(logits.tolist()).toBeCloseToNested(target, 5); }, MAX_TEST_EXECUTION_TIME, ); it( "batch_size>1", async () => { const inputs = tokenizer(["hello", "hello world"], { padding: true }); const { logits } = await model(inputs); const target = [ [0.00043986947275698185, -0.030218850821256638], [0.0003853091038763523, -0.03022204339504242], ]; expect(logits.dims).toEqual([2, 2]); expect(logits.tolist()).toBeCloseToNested(target, 5); }, MAX_TEST_EXECUTION_TIME, ); afterAll(async () => { await model?.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); describe("BertForTokenClassification", () => { const model_id = "hf-internal-testing/tiny-random-BertForTokenClassification"; /** @type {BertForTokenClassification} */ let model; /** @type {BertTokenizer} */ let tokenizer; beforeAll(async () => { model = await BertForTokenClassification.from_pretrained(model_id, DEFAULT_MODEL_OPTIONS); tokenizer = await BertTokenizer.from_pretrained(model_id); }, MAX_MODEL_LOAD_TIME); it( "batch_size=1", async () => { const inputs = tokenizer("hello"); const { logits } = await model(inputs); expect(logits.dims).toEqual([1, 7, 2]); expect(logits.mean().item()).toBeCloseTo(0.07089076191186905, 5); }, MAX_TEST_EXECUTION_TIME, ); it( "batch_size>1", async () => { const inputs = tokenizer(["hello", "hello world"], { padding: true }); const { logits } = await model(inputs); expect(logits.dims).toEqual([2, 12, 2]); expect(logits.mean().item()).toBeCloseTo(0.04702216014266014, 5); }, MAX_TEST_EXECUTION_TIME, ); afterAll(async () => { await model?.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); describe("BertForQuestionAnswering", () => { const model_id = "hf-internal-testing/tiny-random-BertForQuestionAnswering"; /** @type {BertForQuestionAnswering} */ let model; /** @type {BertTokenizer} */ let tokenizer; beforeAll(async () => { model = await BertForQuestionAnswering.from_pretrained(model_id, DEFAULT_MODEL_OPTIONS); tokenizer = await BertTokenizer.from_pretrained(model_id); }, MAX_MODEL_LOAD_TIME); it( "batch_size=1", async () => { const inputs = tokenizer("hello"); const { start_logits, end_logits } = await model(inputs); expect(start_logits.dims).toEqual([1, 7]); expect(start_logits.mean().item()).toBeCloseTo(0.12772157788276672, 5); expect(end_logits.dims).toEqual([1, 7]); expect(end_logits.mean().item()).toBeCloseTo(0.11811424791812897, 5); }, MAX_TEST_EXECUTION_TIME, ); it( "batch_size>1", async () => { const inputs = tokenizer(["hello", "hello world"], { padding: true }); const { start_logits, end_logits } = await model(inputs); expect(start_logits.dims).toEqual([2, 12]); expect(start_logits.mean().item()).toBeCloseTo(0.12843115627765656, 5); expect(end_logits.dims).toEqual([2, 12]); expect(end_logits.mean().item()).toBeCloseTo(0.11745202541351318, 5); }, MAX_TEST_EXECUTION_TIME, ); afterAll(async () => { await model?.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); };
transformers.js/tests/models/bert/test_modeling_bert.js/0
{ "file_path": "transformers.js/tests/models/bert/test_modeling_bert.js", "repo_id": "transformers.js", "token_count": 3074 }
354
import { AutoImageProcessor, SamImageProcessor } from "../../../src/transformers.js"; import { load_cached_image } from "../../asset_cache.js"; import { MAX_PROCESSOR_LOAD_TIME, MAX_TEST_EXECUTION_TIME } from "../../init.js"; export default () => { // SamImageProcessor // - tests normal padding (do_pad=true, pad_size={"height":1024,"width":1024}) // - In addition to the image, pass in a list of points describe("SamImageProcessor", () => { const model_id = "Xenova/sam-vit-base"; /** @type {SamImageProcessor} */ let processor; beforeAll(async () => { processor = await AutoImageProcessor.from_pretrained(model_id); }, MAX_PROCESSOR_LOAD_TIME); it( "without input points", async () => { const image = await load_cached_image("pattern_3x3"); const { pixel_values, original_sizes, reshaped_input_sizes } = await processor(image); expect(pixel_values.dims).toEqual([1, 3, 1024, 1024]); expect(pixel_values.mean().item()).toBeCloseTo(-0.4505715670146813, 6); expect(original_sizes).toEqual([[3, 3]]); expect(reshaped_input_sizes).toEqual([[1024, 1024]]); }, MAX_TEST_EXECUTION_TIME, ); it( "with input points", async () => { const image = await load_cached_image("pattern_3x3"); const { original_sizes, reshaped_input_sizes, input_points } = await processor(image, { input_points: [[[1, 2]]], }); expect(original_sizes).toEqual([[3, 3]]); expect(reshaped_input_sizes).toEqual([[1024, 1024]]); expect(input_points.tolist()).toBeCloseToNested([[[[341.3333, 682.6667]]]], 4); }, MAX_TEST_EXECUTION_TIME, ); it( "multiple points with labels", async () => { const image = await load_cached_image("pattern_3x3"); const { original_sizes, reshaped_input_sizes, input_points, input_labels } = await processor(image, { input_points: [ [ [1, 2], [2, 1], ], ], input_labels: [[1, 0]], }); expect(original_sizes).toEqual([[3, 3]]); expect(reshaped_input_sizes).toEqual([[1024, 1024]]); expect(input_points.tolist()).toBeCloseToNested( [ [ [ [341.3333, 682.6667], [682.6667, 341.3333], ], ], ], 4, ); expect(input_labels.tolist()).toEqual([[[1n, 0n]]]); }, MAX_TEST_EXECUTION_TIME, ); it( "with input boxes", async () => { const image = await load_cached_image("pattern_3x3"); const { original_sizes, reshaped_input_sizes, input_boxes } = await processor(image, { input_boxes: [[[0, 1, 2, 2]]], }); expect(original_sizes).toEqual([[3, 3]]); expect(reshaped_input_sizes).toEqual([[1024, 1024]]); expect(input_boxes.tolist()).toBeCloseToNested([[[0, 341.3333, 682.6667, 682.6667]]], 4); }, MAX_TEST_EXECUTION_TIME, ); }); };
transformers.js/tests/models/sam/test_image_processing_sam.js/0
{ "file_path": "transformers.js/tests/models/sam/test_image_processing_sam.js", "repo_id": "transformers.js", "token_count": 1475 }
355
import { WhisperTokenizer } from "../../../src/tokenizers.js"; import { BASE_TEST_STRINGS, WHISPER_TEST_STRINGS } from "../test_strings.js"; import { compare } from "../../test_utils.js"; export const TOKENIZER_CLASS = WhisperTokenizer; export const TEST_CONFIG = { "onnx-community/whisper-tiny.en": { SIMPLE: { text: BASE_TEST_STRINGS.SIMPLE, tokens: ["How", "\u0120are", "\u0120you", "\u0120doing", "?"], ids: [50257, 50362, 2437, 389, 345, 1804, 30, 50256], decoded: "<|startoftranscript|><|notimestamps|>How are you doing?<|endoftext|>", }, SIMPLE_WITH_PUNCTUATION: { text: BASE_TEST_STRINGS.SIMPLE_WITH_PUNCTUATION, tokens: ["You", "\u0120should", "'ve", "\u0120done", "\u0120this"], ids: [50257, 50362, 1639, 815, 1053, 1760, 428, 50256], decoded: "<|startoftranscript|><|notimestamps|>You should've done this<|endoftext|>", }, NUMBERS: { text: BASE_TEST_STRINGS.NUMBERS, tokens: ["01", "23", "45", "67", "89", "\u01200", "\u01201", "\u01202", "\u01203", "\u01204", "\u01205", "\u01206", "\u01207", "\u01208", "\u01209", "\u012010", "\u0120100", "\u01201000"], ids: [50257, 50362, 486, 1954, 2231, 3134, 4531, 657, 352, 362, 513, 604, 642, 718, 767, 807, 860, 838, 1802, 8576, 50256], decoded: "<|startoftranscript|><|notimestamps|>0123456789 0 1 2 3 4 5 6 7 8 9 10 100 1000<|endoftext|>", }, TEXT_WITH_NUMBERS: { text: BASE_TEST_STRINGS.TEXT_WITH_NUMBERS, tokens: ["The", "\u0120company", "\u0120was", "\u0120founded", "\u0120in", "\u01202016", "."], ids: [50257, 50362, 464, 1664, 373, 9393, 287, 1584, 13, 50256], decoded: "<|startoftranscript|><|notimestamps|>The company was founded in 2016.<|endoftext|>", }, PUNCTUATION: { text: BASE_TEST_STRINGS.PUNCTUATION, tokens: ["A", "\u010a", "'ll", "\u0120!!", "to", "?'", "d", "''", "d", "\u0120of", ",", "\u0120can", "'t", "."], ids: [50257, 50362, 32, 198, 1183, 37867, 1462, 8348, 67, 7061, 67, 286, 11, 460, 470, 13, 50256], decoded: "<|startoftranscript|><|notimestamps|>A\n'll!!to?'d''d of, can't.<|endoftext|>", }, PYTHON_CODE: { text: BASE_TEST_STRINGS.PYTHON_CODE, tokens: ["def", "\u0120main", "():", "\u010a", "\u0109", "pass"], ids: [50257, 50362, 4299, 1388, 33529, 198, 197, 6603, 50256], decoded: "<|startoftranscript|><|notimestamps|>def main():\n\tpass<|endoftext|>", }, JAVASCRIPT_CODE: { text: BASE_TEST_STRINGS.JAVASCRIPT_CODE, tokens: ["let", "\u0120a", "\u0120=", "\u0120obj", ".", "to", "String", "();", "\u010a", "to", "String", "();"], ids: [50257, 50362, 1616, 257, 796, 26181, 13, 1462, 10100, 9783, 198, 1462, 10100, 9783, 50256], decoded: "<|startoftranscript|><|notimestamps|>let a = obj.toString();\ntoString();<|endoftext|>", }, NEWLINES: { text: BASE_TEST_STRINGS.NEWLINES, tokens: ["This", "\u010a", "\u010a", "is", "\u010a", "a", "\u010a", "test", "."], ids: [50257, 50362, 1212, 198, 198, 271, 198, 64, 198, 9288, 13, 50256], decoded: "<|startoftranscript|><|notimestamps|>This\n\nis\na\ntest.<|endoftext|>", }, BASIC: { text: BASE_TEST_STRINGS.BASIC, tokens: ["UN", "want", "\u00c3\u00a9", "d", ",", "running"], ids: [50257, 50362, 4944, 42949, 2634, 67, 11, 20270, 50256], decoded: "<|startoftranscript|><|notimestamps|>UNwant\u00e9d,running<|endoftext|>", }, CONTROL_TOKENS: { text: BASE_TEST_STRINGS.CONTROL_TOKENS, tokens: ["1", "\u0100", "2", "\u00ef\u00bf\u00bd", "3"], ids: [50257, 50362, 16, 188, 17, 4210, 18, 50256], decoded: "<|startoftranscript|><|notimestamps|>1\u00002\ufffd3<|endoftext|>", }, HELLO_WORLD_TITLECASE: { text: BASE_TEST_STRINGS.HELLO_WORLD_TITLECASE, tokens: ["Hello", "\u0120World"], ids: [50257, 50362, 15496, 2159, 50256], decoded: "<|startoftranscript|><|notimestamps|>Hello World<|endoftext|>", }, HELLO_WORLD_LOWERCASE: { text: BASE_TEST_STRINGS.HELLO_WORLD_LOWERCASE, tokens: ["hello", "\u0120world"], ids: [50257, 50362, 31373, 995, 50256], decoded: "<|startoftranscript|><|notimestamps|>hello world<|endoftext|>", }, CHINESE_ONLY: { text: BASE_TEST_STRINGS.CHINESE_ONLY, tokens: ["\u00e7\u0136\u0141", "\u00e6", "\u00b4", "\u00bb", "\u00e7\u013c\u0126", "\u00e7\u013e", "\u0141", "\u00e8", "\u00b0", "\u013d", "\u00e6\u013a\u00af"], ids: [50257, 50362, 37955, 162, 112, 119, 21410, 40367, 253, 164, 108, 249, 42468, 50256], decoded: "<|startoftranscript|><|notimestamps|>\u751f\u6d3b\u7684\u771f\u8c1b\u662f<|endoftext|>", }, LEADING_SPACE: { text: BASE_TEST_STRINGS.LEADING_SPACE, tokens: ["\u0120", "\u0120", "\u0120leading", "\u0120space"], ids: [50257, 50362, 220, 220, 3756, 2272, 50256], decoded: "<|startoftranscript|><|notimestamps|> leading space<|endoftext|>", }, TRAILING_SPACE: { text: BASE_TEST_STRINGS.TRAILING_SPACE, tokens: ["tra", "iling", "\u0120space", "\u0120", "\u0120", "\u0120"], ids: [50257, 50362, 9535, 4386, 2272, 220, 220, 220, 50256], decoded: "<|startoftranscript|><|notimestamps|>trailing space <|endoftext|>", }, DOUBLE_SPACE: { text: BASE_TEST_STRINGS.DOUBLE_SPACE, tokens: ["Hi", "\u0120", "\u0120Hello"], ids: [50257, 50362, 17250, 220, 18435, 50256], decoded: "<|startoftranscript|><|notimestamps|>Hi Hello<|endoftext|>", }, CURRENCY: { text: BASE_TEST_STRINGS.CURRENCY, tokens: ["test", "\u0120$", "1", "\u0120R", "2", "\u0120#", "3", "\u0120\u00e2\u0124\u00ac", "4", "\u0120\u00c2\u00a3", "5", "\u0120\u00c2\u00a5", "6", "\u0120\u00e2", "\u0124", "\u00a3", "7", "\u0120\u00e2", "\u0124", "\u00b9", "8", "\u0120\u00e2", "\u0124", "\u00b1", "9", "\u0120test"], ids: [50257, 50362, 9288, 720, 16, 371, 17, 1303, 18, 10432, 19, 4248, 20, 38221, 21, 2343, 224, 96, 22, 2343, 224, 117, 23, 2343, 224, 109, 24, 1332, 50256], decoded: "<|startoftranscript|><|notimestamps|>test $1 R2 #3 \u20ac4 \u00a35 \u00a56 \u20a37 \u20b98 \u20b19 test<|endoftext|>", }, CURRENCY_WITH_DECIMALS: { text: BASE_TEST_STRINGS.CURRENCY_WITH_DECIMALS, tokens: ["I", "\u0120bought", "\u0120an", "\u0120apple", "\u0120for", "\u0120$", "1", ".", "00", "\u0120at", "\u0120the", "\u0120store", "."], ids: [50257, 50362, 40, 5839, 281, 17180, 329, 720, 16, 13, 405, 379, 262, 3650, 13, 50256], decoded: "<|startoftranscript|><|notimestamps|>I bought an apple for $1.00 at the store.<|endoftext|>", }, ELLIPSIS: { text: BASE_TEST_STRINGS.ELLIPSIS, tokens: ["you", "\u00e2\u0122\u00a6", "\u0120", "\u0120"], ids: [50257, 50362, 5832, 1399, 220, 220, 50256], decoded: "<|startoftranscript|><|notimestamps|>you\u2026 <|endoftext|>", }, TEXT_WITH_ESCAPE_CHARACTERS: { text: BASE_TEST_STRINGS.TEXT_WITH_ESCAPE_CHARACTERS, tokens: ["you", "\u00e2\u0122\u00a6", "\u00c2\u0142\u00c2\u0142"], ids: [50257, 50362, 5832, 1399, 4603, 50256], decoded: "<|startoftranscript|><|notimestamps|>you\u2026\u00a0\u00a0<|endoftext|>", }, TEXT_WITH_ESCAPE_CHARACTERS_2: { text: BASE_TEST_STRINGS.TEXT_WITH_ESCAPE_CHARACTERS_2, tokens: ["you", "\u00e2\u0122\u00a6", "\u00c2\u0142", "\u00c2\u0142", "you", "\u00e2\u0122\u00a6", "\u00c2\u0142\u00c2\u0142"], ids: [50257, 50362, 5832, 1399, 1849, 1849, 5832, 1399, 4603, 50256], decoded: "<|startoftranscript|><|notimestamps|>you\u2026\u00a0\u00a0you\u2026\u00a0\u00a0<|endoftext|>", }, TILDE_NORMALIZATION: { text: BASE_TEST_STRINGS.TILDE_NORMALIZATION, tokens: ["we", "ird", "\u0120\u00ef", "\u00bd", "\u0140", "\u0120edge", "\u0120\u00ef", "\u00bd", "\u0140", "\u0120case"], ids: [50257, 50362, 732, 1447, 27332, 121, 252, 5743, 27332, 121, 252, 1339, 50256], decoded: "<|startoftranscript|><|notimestamps|>weird \uff5e edge \uff5e case<|endoftext|>", }, SPIECE_UNDERSCORE: { text: BASE_TEST_STRINGS.SPIECE_UNDERSCORE, tokens: ["\u00e2\u0138", "\u0123", "This", "\u0120\u00e2\u0138", "\u0123", "is", "\u0120\u00e2\u0138", "\u0123", "a", "\u0120\u00e2\u0138", "\u0123", "test", "\u0120\u00e2\u0138", "\u0123", "."], ids: [50257, 50362, 5008, 223, 1212, 11019, 223, 271, 11019, 223, 64, 11019, 223, 9288, 11019, 223, 13, 50256], decoded: "<|startoftranscript|><|notimestamps|>\u2581This \u2581is \u2581a \u2581test \u2581.<|endoftext|>", }, SPECIAL_TOKENS: { text: WHISPER_TEST_STRINGS.SPECIAL_TOKENS, tokens: ["\u0120", "\u0120", "\u0120", "<|startoftranscript|>", "\u0120", "<|en|>", "\u0120", "\u0120", "\u0120"], ids: [50257, 50362, 220, 220, 220, 50257, 220, 50258, 220, 220, 220, 50256], decoded: "<|startoftranscript|><|notimestamps|> <|startoftranscript|> <|en|> <|endoftext|>", }, }, "distil-whisper/distil-large-v3": { NUMBERS: { text: BASE_TEST_STRINGS.NUMBERS, tokens: ["0", "12", "3", "45", "67", "89", "\u01200", "\u01201", "\u01202", "\u01203", "\u01204", "\u01205", "\u01206", "\u01207", "\u01208", "\u01209", "\u012010", "\u0120100", "\u01201000"], ids: [50258, 50364, 15, 4762, 18, 8465, 22452, 21115, 1958, 502, 568, 805, 1017, 1025, 1386, 1614, 1649, 1722, 1266, 2319, 9714, 50257], decoded: "<|startoftranscript|><|notimestamps|>0123456789 0 1 2 3 4 5 6 7 8 9 10 100 1000<|endoftext|>", }, PYTHON_CODE: { text: BASE_TEST_STRINGS.PYTHON_CODE, tokens: ["def", "\u0120main", "(", "):", "\u010a", "\u0109", "pass"], ids: [50258, 50364, 20595, 2135, 7, 4507, 198, 197, 9216, 50257], decoded: "<|startoftranscript|><|notimestamps|>def main():\n\tpass<|endoftext|>", }, JAVASCRIPT_CODE: { text: BASE_TEST_STRINGS.JAVASCRIPT_CODE, tokens: ["let", "\u0120a", "\u0120=", "\u0120ob", "j", ".", "to", "St", "ring", "(", ");", "\u010a", "to", "St", "ring", "(", ");"], ids: [50258, 50364, 2631, 257, 6585, 1111, 73, 13, 1353, 4520, 2937, 7, 34446, 198, 1353, 4520, 2937, 7, 34446, 50257], decoded: "<|startoftranscript|><|notimestamps|>let a = obj.toString();\ntoString();<|endoftext|>", }, BASIC: { text: BASE_TEST_STRINGS.BASIC, tokens: ["UN", "w", "ant", "\u00c3\u00a9d", ",", "running"], ids: [50258, 50364, 3979, 86, 394, 7811, 11, 45482, 50257], decoded: "<|startoftranscript|><|notimestamps|>UNwant\u00e9d,running<|endoftext|>", }, HELLO_WORLD_LOWERCASE: { text: BASE_TEST_STRINGS.HELLO_WORLD_LOWERCASE, tokens: ["he", "llo", "\u0120world"], ids: [50258, 50364, 675, 1913, 1002, 50257], decoded: "<|startoftranscript|><|notimestamps|>hello world<|endoftext|>", }, CHINESE_ONLY: { text: BASE_TEST_STRINGS.CHINESE_ONLY, tokens: ["\u00e7\u0136\u0141\u00e6\u00b4\u00bb", "\u00e7\u013c\u0126", "\u00e7\u013e\u0141", "\u00e8\u00b0", "\u013d", "\u00e6\u013a\u00af"], ids: [50258, 50364, 49958, 1546, 6303, 8897, 249, 1541, 50257], decoded: "<|startoftranscript|><|notimestamps|>\u751f\u6d3b\u7684\u771f\u8c1b\u662f<|endoftext|>", }, CURRENCY: { text: BASE_TEST_STRINGS.CURRENCY, tokens: ["test", "\u0120$", "1", "\u0120R", "2", "\u0120#", "3", "\u0120\u00e2\u0124\u00ac", "4", "\u0120\u00c2\u00a3", "5", "\u0120\u00c2", "\u00a5", "6", "\u0120\u00e2", "\u0124", "\u00a3", "7", "\u0120\u00e2", "\u0124", "\u00b9", "8", "\u0120\u00e2", "\u0124", "\u00b1", "9", "\u0120test"], ids: [50258, 50364, 31636, 1848, 16, 497, 17, 3536, 18, 17450, 19, 14378, 20, 1815, 98, 21, 672, 224, 96, 22, 672, 224, 117, 23, 672, 224, 109, 24, 1500, 50257], decoded: "<|startoftranscript|><|notimestamps|>test $1 R2 #3 \u20ac4 \u00a35 \u00a56 \u20a37 \u20b98 \u20b19 test<|endoftext|>", }, TEXT_WITH_ESCAPE_CHARACTERS: { text: BASE_TEST_STRINGS.TEXT_WITH_ESCAPE_CHARACTERS, tokens: ["you", "\u00e2\u0122\u00a6", "\u00c2", "\u0142", "\u00c2", "\u0142"], ids: [50258, 50364, 5616, 1260, 126, 254, 126, 254, 50257], decoded: "<|startoftranscript|><|notimestamps|>you\u2026\u00a0\u00a0<|endoftext|>", }, TEXT_WITH_ESCAPE_CHARACTERS_2: { text: BASE_TEST_STRINGS.TEXT_WITH_ESCAPE_CHARACTERS_2, tokens: ["you", "\u00e2\u0122\u00a6", "\u00c2", "\u0142", "\u00c2", "\u0142", "you", "\u00e2\u0122\u00a6", "\u00c2", "\u0142", "\u00c2", "\u0142"], ids: [50258, 50364, 5616, 1260, 126, 254, 126, 254, 5616, 1260, 126, 254, 126, 254, 50257], decoded: "<|startoftranscript|><|notimestamps|>you\u2026\u00a0\u00a0you\u2026\u00a0\u00a0<|endoftext|>", }, }, "distil-whisper/distil-large-v2": { SIMPLE: { text: BASE_TEST_STRINGS.SIMPLE, tokens: ["How", "\u0120are", "\u0120you", "\u0120doing", "?"], ids: [50258, 50259, 50359, 50363, 6462, 366, 291, 884, 30, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>How are you doing?<|endoftext|>", }, SIMPLE_WITH_PUNCTUATION: { text: BASE_TEST_STRINGS.SIMPLE_WITH_PUNCTUATION, tokens: ["You", "\u0120should", "'ve", "\u0120done", "\u0120", "this"], ids: [50258, 50259, 50359, 50363, 3223, 820, 600, 1096, 220, 11176, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>You should've done this<|endoftext|>", }, NUMBERS: { text: BASE_TEST_STRINGS.NUMBERS, tokens: ["0", "12", "3", "45", "67", "89", "\u01200", "\u01201", "\u01202", "\u01203", "\u01204", "\u01205", "\u01206", "\u01207", "\u01208", "\u01209", "\u012010", "\u0120100", "\u01201000"], ids: [50258, 50259, 50359, 50363, 15, 4762, 18, 8465, 22452, 21115, 1958, 502, 568, 805, 1017, 1025, 1386, 1614, 1649, 1722, 1266, 2319, 9714, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>0123456789 0 1 2 3 4 5 6 7 8 9 10 100 1000<|endoftext|>", }, TEXT_WITH_NUMBERS: { text: BASE_TEST_STRINGS.TEXT_WITH_NUMBERS, tokens: ["The", "\u0120company", "\u0120was", "\u0120founded", "\u0120in", "\u01202016", "."], ids: [50258, 50259, 50359, 50363, 2278, 2237, 390, 13234, 294, 6549, 13, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>The company was founded in 2016.<|endoftext|>", }, PUNCTUATION: { text: BASE_TEST_STRINGS.PUNCTUATION, tokens: ["A", "\u010a", "'ll", "\u0120!!", "to", "?'", "d", "''", "d", "\u0120of", ",", "\u0120can", "'t", "."], ids: [50258, 50259, 50359, 50363, 32, 198, 603, 15138, 1353, 8569, 67, 15025, 67, 295, 11, 393, 380, 13, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>A\n'll!!to?'d''d of, can't.<|endoftext|>", }, PYTHON_CODE: { text: BASE_TEST_STRINGS.PYTHON_CODE, tokens: ["def", "\u0120main", "(", "):", "\u010a", "\u0109", "pass"], ids: [50258, 50259, 50359, 50363, 20595, 2135, 7, 4507, 198, 197, 9216, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>def main():\n\tpass<|endoftext|>", }, JAVASCRIPT_CODE: { text: BASE_TEST_STRINGS.JAVASCRIPT_CODE, tokens: ["let", "\u0120a", "\u0120=", "\u0120ob", "j", ".", "to", "St", "ring", "(", ");", "\u010a", "to", "St", "ring", "(", ");"], ids: [50258, 50259, 50359, 50363, 2631, 257, 6585, 1111, 73, 13, 1353, 4520, 2937, 7, 34446, 198, 1353, 4520, 2937, 7, 34446, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>let a = obj.toString();\ntoString();<|endoftext|>", }, NEWLINES: { text: BASE_TEST_STRINGS.NEWLINES, tokens: ["This", "\u010a", "\u010a", "is", "\u010a", "a", "\u010a", "test", "."], ids: [50258, 50259, 50359, 50363, 5723, 198, 198, 271, 198, 64, 198, 31636, 13, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>This\n\nis\na\ntest.<|endoftext|>", }, BASIC: { text: BASE_TEST_STRINGS.BASIC, tokens: ["UN", "w", "ant", "\u00c3\u00a9d", ",", "running"], ids: [50258, 50259, 50359, 50363, 3979, 86, 394, 7811, 11, 45482, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>UNwant\u00e9d,running<|endoftext|>", }, CONTROL_TOKENS: { text: BASE_TEST_STRINGS.CONTROL_TOKENS, tokens: ["1", "\u0100", "2", "\u00ef\u00bf\u00bd", "3"], ids: [50258, 50259, 50359, 50363, 16, 188, 17, 5342, 18, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>1\u00002\ufffd3<|endoftext|>", }, HELLO_WORLD_TITLECASE: { text: BASE_TEST_STRINGS.HELLO_WORLD_TITLECASE, tokens: ["Hello", "\u0120World"], ids: [50258, 50259, 50359, 50363, 15947, 3937, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>Hello World<|endoftext|>", }, HELLO_WORLD_LOWERCASE: { text: BASE_TEST_STRINGS.HELLO_WORLD_LOWERCASE, tokens: ["he", "llo", "\u0120world"], ids: [50258, 50259, 50359, 50363, 675, 1913, 1002, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>hello world<|endoftext|>", }, CHINESE_ONLY: { text: BASE_TEST_STRINGS.CHINESE_ONLY, tokens: ["\u00e7\u0136\u0141\u00e6\u00b4\u00bb", "\u00e7\u013c\u0126", "\u00e7\u013e\u0141", "\u00e8\u00b0", "\u013d", "\u00e6\u013a\u00af"], ids: [50258, 50259, 50359, 50363, 49958, 1546, 6303, 8897, 249, 1541, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>\u751f\u6d3b\u7684\u771f\u8c1b\u662f<|endoftext|>", }, LEADING_SPACE: { text: BASE_TEST_STRINGS.LEADING_SPACE, tokens: ["\u0120", "\u0120", "\u0120leading", "\u0120space"], ids: [50258, 50259, 50359, 50363, 220, 220, 5775, 1901, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|> leading space<|endoftext|>", }, TRAILING_SPACE: { text: BASE_TEST_STRINGS.TRAILING_SPACE, tokens: ["tra", "iling", "\u0120space", "\u0120", "\u0120", "\u0120"], ids: [50258, 50259, 50359, 50363, 17227, 4883, 1901, 220, 220, 220, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>trailing space <|endoftext|>", }, DOUBLE_SPACE: { text: BASE_TEST_STRINGS.DOUBLE_SPACE, tokens: ["Hi", "\u0120", "\u0120Hello"], ids: [50258, 50259, 50359, 50363, 17155, 220, 2425, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>Hi Hello<|endoftext|>", }, CURRENCY: { text: BASE_TEST_STRINGS.CURRENCY, tokens: ["test", "\u0120$", "1", "\u0120R", "2", "\u0120#", "3", "\u0120\u00e2\u0124\u00ac", "4", "\u0120\u00c2\u00a3", "5", "\u0120\u00c2", "\u00a5", "6", "\u0120\u00e2", "\u0124", "\u00a3", "7", "\u0120\u00e2", "\u0124", "\u00b9", "8", "\u0120\u00e2", "\u0124", "\u00b1", "9", "\u0120", "test"], ids: [50258, 50259, 50359, 50363, 31636, 1848, 16, 497, 17, 3536, 18, 17450, 19, 14378, 20, 1815, 98, 21, 672, 224, 96, 22, 672, 224, 117, 23, 672, 224, 109, 24, 220, 31636, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>test $1 R2 #3 \u20ac4 \u00a35 \u00a56 \u20a37 \u20b98 \u20b19 test<|endoftext|>", }, CURRENCY_WITH_DECIMALS: { text: BASE_TEST_STRINGS.CURRENCY_WITH_DECIMALS, tokens: ["I", "\u0120bought", "\u0120an", "\u0120apple", "\u0120for", "\u0120$", "1", ".", "00", "\u0120at", "\u0120", "the", "\u0120store", "."], ids: [50258, 50259, 50359, 50363, 40, 4243, 364, 10606, 337, 1848, 16, 13, 628, 412, 220, 3322, 3531, 13, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>I bought an apple for $1.00 at the store.<|endoftext|>", }, ELLIPSIS: { text: BASE_TEST_STRINGS.ELLIPSIS, tokens: ["you", "\u00e2\u0122\u00a6", "\u0120", "\u0120"], ids: [50258, 50259, 50359, 50363, 5616, 1260, 220, 220, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>you\u2026 <|endoftext|>", }, TEXT_WITH_ESCAPE_CHARACTERS: { text: BASE_TEST_STRINGS.TEXT_WITH_ESCAPE_CHARACTERS, tokens: ["you", "\u00e2\u0122\u00a6", "\u00c2", "\u0142", "\u00c2", "\u0142"], ids: [50258, 50259, 50359, 50363, 5616, 1260, 126, 254, 126, 254, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>you\u2026\u00a0\u00a0<|endoftext|>", }, TEXT_WITH_ESCAPE_CHARACTERS_2: { text: BASE_TEST_STRINGS.TEXT_WITH_ESCAPE_CHARACTERS_2, tokens: ["you", "\u00e2\u0122\u00a6", "\u00c2", "\u0142", "\u00c2", "\u0142", "you", "\u00e2\u0122\u00a6", "\u00c2", "\u0142", "\u00c2", "\u0142"], ids: [50258, 50259, 50359, 50363, 5616, 1260, 126, 254, 126, 254, 5616, 1260, 126, 254, 126, 254, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>you\u2026\u00a0\u00a0you\u2026\u00a0\u00a0<|endoftext|>", }, TILDE_NORMALIZATION: { text: BASE_TEST_STRINGS.TILDE_NORMALIZATION, tokens: ["we", "ird", "\u0120\u00ef", "\u00bd", "\u0140", "\u0120edge", "\u0120\u00ef", "\u00bd", "\u0140", "\u0120case"], ids: [50258, 50259, 50359, 50363, 826, 1271, 25072, 121, 252, 4691, 25072, 121, 252, 1389, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>weird \uff5e edge \uff5e case<|endoftext|>", }, SPIECE_UNDERSCORE: { text: BASE_TEST_STRINGS.SPIECE_UNDERSCORE, tokens: ["\u00e2\u0138", "\u0123", "This", "\u0120\u00e2\u0138", "\u0123", "is", "\u0120\u00e2\u0138", "\u0123", "a", "\u0120\u00e2\u0138", "\u0123", "test", "\u0120\u00e2\u0138", "\u0123", "."], ids: [50258, 50259, 50359, 50363, 39984, 223, 5723, 29405, 223, 271, 29405, 223, 64, 29405, 223, 31636, 29405, 223, 13, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|>\u2581This \u2581is \u2581a \u2581test \u2581.<|endoftext|>", }, SPECIAL_TOKENS: { text: WHISPER_TEST_STRINGS.SPECIAL_TOKENS, tokens: ["\u0120", "\u0120", "\u0120", "<|startoftranscript|>", "\u0120", "<|en|>", "\u0120", "\u0120", "\u0120"], ids: [50258, 50259, 50359, 50363, 220, 220, 220, 50258, 220, 50259, 220, 220, 220, 50257], decoded: "<|startoftranscript|><|en|><|transcribe|><|notimestamps|> <|startoftranscript|> <|en|> <|endoftext|>", }, }, "distil-whisper/distil-small.en": { SPECIAL_TOKENS: { text: WHISPER_TEST_STRINGS.SPECIAL_TOKENS, // https://github.com/huggingface/transformers/issues/33371 // tokens: [" <|startoftranscript|> ", "<|en|> "], tokens: ["<|startoftranscript|>", "<|en|>"], ids: [50257, 50362, 50257, 50258, 50256], decoded: "<|startoftranscript|><|notimestamps|><|startoftranscript|><|en|><|endoftext|>", }, }, "Xenova/nb-whisper-tiny-beta": { SIMPLE: { text: BASE_TEST_STRINGS.SIMPLE, tokens: ["\u0120How", "\u0120are", "\u0120you", "\u0120doing", "?"], ids: [50258, 50288, 50359, 50363, 1012, 366, 291, 884, 30, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> How are you doing?<|endoftext|>", }, SIMPLE_WITH_PUNCTUATION: { text: BASE_TEST_STRINGS.SIMPLE_WITH_PUNCTUATION, tokens: ["\u0120You", "\u0120should", "'ve", "\u0120done", "\u0120", "this"], ids: [50258, 50288, 50359, 50363, 509, 820, 600, 1096, 220, 11176, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> You should've done this<|endoftext|>", }, NUMBERS: { text: BASE_TEST_STRINGS.NUMBERS, tokens: ["\u01200", "12", "3", "45", "67", "89", "\u01200", "\u01201", "\u01202", "\u01203", "\u01204", "\u01205", "\u01206", "\u01207", "\u01208", "\u01209", "\u012010", "\u0120100", "\u01201000"], ids: [50258, 50288, 50359, 50363, 1958, 4762, 18, 8465, 22452, 21115, 1958, 502, 568, 805, 1017, 1025, 1386, 1614, 1649, 1722, 1266, 2319, 9714, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> 0123456789 0 1 2 3 4 5 6 7 8 9 10 100 1000<|endoftext|>", }, TEXT_WITH_NUMBERS: { text: BASE_TEST_STRINGS.TEXT_WITH_NUMBERS, tokens: ["\u0120The", "\u0120company", "\u0120was", "\u0120founded", "\u0120in", "\u01202016", "."], ids: [50258, 50288, 50359, 50363, 440, 2237, 390, 13234, 294, 6549, 13, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> The company was founded in 2016.<|endoftext|>", }, PUNCTUATION: { text: BASE_TEST_STRINGS.PUNCTUATION, tokens: ["\u0120A", "\u010a", "'ll", "\u0120!!", "to", "?'", "d", "''", "d", "\u0120of", ",", "\u0120can", "'t", "."], ids: [50258, 50288, 50359, 50363, 316, 198, 603, 15138, 1353, 8569, 67, 15025, 67, 295, 11, 393, 380, 13, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> A\n'll!!to?'d''d of, can't.<|endoftext|>", }, PYTHON_CODE: { text: BASE_TEST_STRINGS.PYTHON_CODE, tokens: ["\u0120def", "\u0120main", "(", "):", "\u010a", "\u0109", "pass"], ids: [50258, 50288, 50359, 50363, 1060, 2135, 7, 4507, 198, 197, 9216, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> def main():\n\tpass<|endoftext|>", }, JAVASCRIPT_CODE: { text: BASE_TEST_STRINGS.JAVASCRIPT_CODE, tokens: ["\u0120let", "\u0120a", "\u0120=", "\u0120ob", "j", ".", "to", "St", "ring", "(", ");", "\u010a", "to", "St", "ring", "(", ");"], ids: [50258, 50288, 50359, 50363, 718, 257, 6585, 1111, 73, 13, 1353, 4520, 2937, 7, 34446, 198, 1353, 4520, 2937, 7, 34446, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> let a = obj.toString();\ntoString();<|endoftext|>", }, NEWLINES: { text: BASE_TEST_STRINGS.NEWLINES, tokens: ["\u0120This", "\u010a", "\u010a", "is", "\u010a", "a", "\u010a", "test", "."], ids: [50258, 50288, 50359, 50363, 639, 198, 198, 271, 198, 64, 198, 31636, 13, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> This\n\nis\na\ntest.<|endoftext|>", }, BASIC: { text: BASE_TEST_STRINGS.BASIC, tokens: ["\u0120UN", "w", "ant", "\u00c3\u00a9d", ",", "running"], ids: [50258, 50288, 50359, 50363, 8229, 86, 394, 7811, 11, 45482, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> UNwant\u00e9d,running<|endoftext|>", }, CONTROL_TOKENS: { text: BASE_TEST_STRINGS.CONTROL_TOKENS, tokens: ["\u01201", "\u0100", "2", "\u00ef\u00bf\u00bd", "3"], ids: [50258, 50288, 50359, 50363, 502, 188, 17, 5342, 18, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> 1\u00002\ufffd3<|endoftext|>", }, HELLO_WORLD_TITLECASE: { text: BASE_TEST_STRINGS.HELLO_WORLD_TITLECASE, tokens: ["\u0120Hello", "\u0120World"], ids: [50258, 50288, 50359, 50363, 2425, 3937, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> Hello World<|endoftext|>", }, HELLO_WORLD_LOWERCASE: { text: BASE_TEST_STRINGS.HELLO_WORLD_LOWERCASE, tokens: ["\u0120hello", "\u0120world"], ids: [50258, 50288, 50359, 50363, 7751, 1002, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> hello world<|endoftext|>", }, CHINESE_ONLY: { text: BASE_TEST_STRINGS.CHINESE_ONLY, tokens: ["\u0120", "\u00e7\u0136\u0141\u00e6\u00b4\u00bb", "\u00e7\u013c\u0126", "\u00e7\u013e\u0141", "\u00e8\u00b0", "\u013d", "\u00e6\u013a\u00af"], ids: [50258, 50288, 50359, 50363, 220, 49958, 1546, 6303, 8897, 249, 1541, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> \u751f\u6d3b\u7684\u771f\u8c1b\u662f<|endoftext|>", }, LEADING_SPACE: { text: BASE_TEST_STRINGS.LEADING_SPACE, tokens: ["\u0120", "\u0120", "\u0120leading", "\u0120space"], ids: [50258, 50288, 50359, 50363, 220, 220, 5775, 1901, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> leading space<|endoftext|>", }, TRAILING_SPACE: { text: BASE_TEST_STRINGS.TRAILING_SPACE, tokens: ["\u0120", "tra", "iling", "\u0120space", "\u0120", "\u0120", "\u0120"], ids: [50258, 50288, 50359, 50363, 220, 17227, 4883, 1901, 220, 220, 220, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> trailing space <|endoftext|>", }, DOUBLE_SPACE: { text: BASE_TEST_STRINGS.DOUBLE_SPACE, tokens: ["\u0120Hi", "\u0120", "\u0120Hello"], ids: [50258, 50288, 50359, 50363, 2421, 220, 2425, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> Hi Hello<|endoftext|>", }, CURRENCY: { text: BASE_TEST_STRINGS.CURRENCY, tokens: ["\u0120", "test", "\u0120$", "1", "\u0120R", "2", "\u0120#", "3", "\u0120\u00e2\u0124\u00ac", "4", "\u0120\u00c2\u00a3", "5", "\u0120\u00c2", "\u00a5", "6", "\u0120\u00e2", "\u0124", "\u00a3", "7", "\u0120\u00e2", "\u0124", "\u00b9", "8", "\u0120\u00e2", "\u0124", "\u00b1", "9", "\u0120", "test"], ids: [50258, 50288, 50359, 50363, 220, 31636, 1848, 16, 497, 17, 3536, 18, 17450, 19, 14378, 20, 1815, 98, 21, 672, 224, 96, 22, 672, 224, 117, 23, 672, 224, 109, 24, 220, 31636, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> test $1 R2 #3 \u20ac4 \u00a35 \u00a56 \u20a37 \u20b98 \u20b19 test<|endoftext|>", }, CURRENCY_WITH_DECIMALS: { text: BASE_TEST_STRINGS.CURRENCY_WITH_DECIMALS, tokens: ["\u0120I", "\u0120bought", "\u0120an", "\u0120apple", "\u0120for", "\u0120$", "1", ".", "00", "\u0120at", "\u0120", "the", "\u0120store", "."], ids: [50258, 50288, 50359, 50363, 286, 4243, 364, 10606, 337, 1848, 16, 13, 628, 412, 220, 3322, 3531, 13, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> I bought an apple for $1.00 at the store.<|endoftext|>", }, ELLIPSIS: { text: BASE_TEST_STRINGS.ELLIPSIS, tokens: ["\u0120you", "\u00e2\u0122\u00a6", "\u0120", "\u0120"], ids: [50258, 50288, 50359, 50363, 291, 1260, 220, 220, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> you\u2026 <|endoftext|>", }, TEXT_WITH_ESCAPE_CHARACTERS: { text: BASE_TEST_STRINGS.TEXT_WITH_ESCAPE_CHARACTERS, tokens: ["\u0120you", "\u00e2\u0122\u00a6", "\u00c2", "\u0142", "\u00c2", "\u0142"], ids: [50258, 50288, 50359, 50363, 291, 1260, 126, 254, 126, 254, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> you\u2026\u00a0\u00a0<|endoftext|>", }, TEXT_WITH_ESCAPE_CHARACTERS_2: { text: BASE_TEST_STRINGS.TEXT_WITH_ESCAPE_CHARACTERS_2, tokens: ["\u0120you", "\u00e2\u0122\u00a6", "\u00c2", "\u0142", "\u00c2", "\u0142", "you", "\u00e2\u0122\u00a6", "\u00c2", "\u0142", "\u00c2", "\u0142"], ids: [50258, 50288, 50359, 50363, 291, 1260, 126, 254, 126, 254, 5616, 1260, 126, 254, 126, 254, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> you\u2026\u00a0\u00a0you\u2026\u00a0\u00a0<|endoftext|>", }, TILDE_NORMALIZATION: { text: BASE_TEST_STRINGS.TILDE_NORMALIZATION, tokens: ["\u0120weird", "\u0120\u00ef", "\u00bd", "\u0140", "\u0120edge", "\u0120\u00ef", "\u00bd", "\u0140", "\u0120case"], ids: [50258, 50288, 50359, 50363, 3657, 25072, 121, 252, 4691, 25072, 121, 252, 1389, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> weird \uff5e edge \uff5e case<|endoftext|>", }, SPIECE_UNDERSCORE: { text: BASE_TEST_STRINGS.SPIECE_UNDERSCORE, tokens: ["\u0120\u00e2\u0138", "\u0123", "This", "\u0120\u00e2\u0138", "\u0123", "is", "\u0120\u00e2\u0138", "\u0123", "a", "\u0120\u00e2\u0138", "\u0123", "test", "\u0120\u00e2\u0138", "\u0123", "."], ids: [50258, 50288, 50359, 50363, 29405, 223, 5723, 29405, 223, 271, 29405, 223, 64, 29405, 223, 31636, 29405, 223, 13, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> \u2581This \u2581is \u2581a \u2581test \u2581.<|endoftext|>", }, SPECIAL_TOKENS: { text: WHISPER_TEST_STRINGS.SPECIAL_TOKENS, tokens: ["\u0120", "\u0120", "\u0120", "<|startoftranscript|>", "\u0120", "<|en|>", "\u0120", "\u0120", "\u0120"], ids: [50258, 50288, 50359, 50363, 220, 220, 220, 50258, 220, 50259, 220, 220, 220, 50257], decoded: "<|startoftranscript|><|no|><|transcribe|><|notimestamps|> <|startoftranscript|> <|en|> <|endoftext|>", }, }, }; const MAX_EXECUTION_TIME = 10_000; export const CUSTOM_TESTS = () => { describe("Decode ASR", () => { it( "should decode ASR outputs", async () => { const tokenizer = await WhisperTokenizer.from_pretrained("onnx-community/whisper-tiny.en_timestamped"); const model_outputs = [ { stride: [30, 0, 5], tokens: [50257n, 50362n, 8410n, 7283n, 0n, 2329n, 8410n, 7283n, 0n, 2094n, 470n, 1309n, 534n, 10625n, 307n, 10625n, 13n, 34668n, 345n, 531n, 9439n, 11n, 523n, 655n, 8410n, 7283n, 0n, 39134n, 16592n, 10625n, 0n, 9440n, 36n, 26751n, 0n, 25848n, 8410n, 7283n, 0n, 2773n, 661n, 4320n, 1943n, 981n, 345n, 821n, 8066n, 7765n, 510n, 290n, 670n, 1327n, 379n, 340n, 13n, 10528n, 318n, 5340n, 0n, 50256n], token_timestamps: [0, 0, 0, 3.78, 4.22, 5.34, 6.04, 6.56, 7, 7.92, 8.58, 8.58, 8.88, 9.14, 9.54, 9.94, 10.58, 11.38, 11.88, 12.42, 12.62, 13, 13.36, 13.64, 14.26, 14.76, 15.12, 15.4, 15.74, 16.12, 16.66, 17.14, 17.24, 17.24, 17.72, 18.38, 18.6, 19.38, 19.92, 22.66, 22.9, 23.24, 23.5, 24.14, 24.56, 24.7, 24.72, 24.94, 25.18, 25.54, 25.72, 26.02, 26.34, 26.44, 26.84, 27.04, 27.16, 27.54, 28.06, 29.92], }, { stride: [30, 5, 5], tokens: [50257n, 50362n, 2773n, 661n, 4320n, 1943n, 981n, 345n, 821n, 8066n, 7765n, 510n, 290n, 670n, 1327n, 379n, 340n, 13n, 10528n, 318n, 5340n, 13n, 921n, 815n, 651n, 284n, 262n, 966n, 810n, 2687n, 2073n, 561n, 11238n, 290n, 345n, 821n, 407n, 8066n, 2245n, 612n, 13n, 1400n, 11n, 644n, 389n, 345n, 4953n, 329n, 30n, 2141n, 340n, 0n, 2329n, 466n, 340n, 0n, 3363n, 345n, 460n, 0n, 2329n, 466n, 340n, 0n, 50256n], token_timestamps: [0, 0, 0, 2.92, 3.24, 3.48, 4.14, 4.56, 4.7, 4.74, 4.92, 5.18, 5.54, 5.72, 6.04, 6.34, 6.46, 6.84, 7.04, 7.16, 7.54, 8.12, 10.16, 10.7, 10.9, 11.12, 11.24, 11.48, 11.84, 12.44, 12.82, 13.2, 13.46, 13.72, 14.06, 14.28, 14.34, 14.56, 14.8, 15.16, 15.9, 16.42, 16.82, 16.86, 17.02, 17.1, 17.22, 17.56, 18.06, 19.28, 19.62, 20.26, 21.96, 22.64, 24.28, 24.76, 25.18, 25.56, 25.78, 26.28, 27.12, 27.54, 27.82, 28.22, 29.48], }, { stride: [23.7728125, 5, 0], tokens: [50257n, 50362n, 2329n, 466n, 340n, 0n, 3363n, 345n, 460n, 0n, 2329n, 466n, 340n, 0n, 1002n, 345n, 821n, 10032n, 286n, 3599n, 625n, 11n, 2245n, 3501n, 510n, 13n, 50256n], token_timestamps: [0, 0, 0, 2.44, 4.3, 5.04, 5.06, 5.56, 5.8, 6.32, 7.12, 7.56, 7.8, 8.9, 10.92, 12.96, 13.28, 13.28, 13.44, 13.72, 13.96, 14.84, 15.5, 16.06, 16.86, 17.88, 20.92], }, ]; const target = [ " DO IT! Just DO IT! Don't let your dreams be dreams. Yesterday you said tomorrow, so just DO IT! MAKE YOUR dreams! COME TRUE! JUST DO IT! Some people dream success while you're gonna wake up and work hard at it. Nothing is impossible. You should get to the point where anyone else would quit and you're not gonna stop there. No, what are you waiting for? Do it! Just do it! Yes you can! Just do it! If you're tired of starting over, stop giving up.", { chunks: [ { text: " DO", timestamp: [0.0, 3.78] }, { text: " IT!", timestamp: [3.78, 4.24 /* 5.34 */] }, { text: " Just", timestamp: [5.34, 6.04] }, { text: " DO", timestamp: [6.04, 6.56] }, { text: " IT!", timestamp: [6.56, 7.02 /* 7.92 */] }, { text: " Don't", timestamp: [7.92, 8.58] }, { text: " let", timestamp: [8.58, 8.88] }, { text: " your", timestamp: [8.88, 9.14] }, { text: " dreams", timestamp: [9.14, 9.54] }, { text: " be", timestamp: [9.54, 9.94] }, { text: " dreams.", timestamp: [9.94, 10.6 /* 11.38 */] }, { text: " Yesterday", timestamp: [11.38, 11.88] }, { text: " you", timestamp: [11.88, 12.42] }, { text: " said", timestamp: [12.42, 12.62] }, { text: " tomorrow,", timestamp: [12.62, 13.02 /* 13.36 */] }, { text: " so", timestamp: [13.36, 13.64] }, { text: " just", timestamp: [13.64, 14.26] }, { text: " DO", timestamp: [14.26, 14.76] }, { text: " IT!", timestamp: [14.76, 15.14 /* 15.4 */] }, { text: " MAKE", timestamp: [15.4, 15.74] }, { text: " YOUR", timestamp: [15.74, 16.12] }, { text: " dreams!", timestamp: [16.12, 16.68 /* 17.14 */] }, { text: " COME", timestamp: [17.14, 17.24] }, { text: " TRUE!", timestamp: [17.24, 17.74 /* 18.38 */] }, { text: " JUST", timestamp: [18.38, 18.6] }, { text: " DO", timestamp: [18.6, 19.38] }, { text: " IT!", timestamp: [19.38, 19.94 /* 22.66 */] }, { text: " Some", timestamp: [22.66, 22.9] }, { text: " people", timestamp: [22.9, 23.24] }, { text: " dream", timestamp: [23.24, 23.5] }, { text: " success", timestamp: [23.5, 24.14] }, { text: " while", timestamp: [24.14, 24.56] }, { text: " you're", timestamp: [24.56, 24.72] }, { text: " gonna", timestamp: [24.72, 24.94] }, { text: " wake", timestamp: [24.94, 25.18] }, { text: " up", timestamp: [25.18, 25.54] }, { text: " and", timestamp: [25.54, 25.72] }, { text: " work", timestamp: [25.72, 26.04] }, { text: " hard", timestamp: [26.04, 26.34] }, { text: " at", timestamp: [26.34, 26.46] }, { text: " it.", timestamp: [26.46, 26.86 /* 27.04 */] }, { text: " Nothing", timestamp: [27.04, 27.16] }, { text: " is", timestamp: [27.16, 27.54] }, { text: " impossible.", timestamp: [27.54, 28.14 /* 30.16 */] }, { text: " You", timestamp: [30.16, 30.7] }, { text: " should", timestamp: [30.7, 30.9] }, { text: " get", timestamp: [30.9, 31.12] }, { text: " to", timestamp: [31.12, 31.24] }, { text: " the", timestamp: [31.24, 31.48] }, { text: " point", timestamp: [31.48, 31.84] }, { text: " where", timestamp: [31.84, 32.44] }, { text: " anyone", timestamp: [32.44, 32.82] }, { text: " else", timestamp: [32.82, 33.2] }, { text: " would", timestamp: [33.2, 33.46] }, { text: " quit", timestamp: [33.46, 33.72] }, { text: " and", timestamp: [33.72, 34.06] }, { text: " you're", timestamp: [34.06, 34.34] }, { text: " not", timestamp: [34.34, 34.56] }, { text: " gonna", timestamp: [34.56, 34.8] }, { text: " stop", timestamp: [34.8, 35.16] }, { text: " there.", timestamp: [35.16, 35.92 /* 36.42 */] }, { text: " No,", timestamp: [36.42, 36.84 /* 36.86 */] }, { text: " what", timestamp: [36.86, 37.02] }, { text: " are", timestamp: [37.02, 37.1] }, { text: " you", timestamp: [37.1, 37.22] }, { text: " waiting", timestamp: [37.22, 37.56] }, { text: " for?", timestamp: [37.56, 38.08 /* 39.28 */] }, { text: " Do", timestamp: [39.28, 39.62] }, { text: " it!", timestamp: [39.62, 40.28 /* 41.96 */] }, { text: " Just", timestamp: [41.96, 42.64] }, { text: " do", timestamp: [42.64, 44.28] }, { text: " it!", timestamp: [44.28, 44.78 /* 45.18 */] }, { text: " Yes", timestamp: [45.18, 45.56] }, { text: " you", timestamp: [45.56, 45.78] }, { text: " can!", timestamp: [45.8, 46.34 /* 47.12 */] }, { text: " Just", timestamp: [47.12, 47.56] }, { text: " do", timestamp: [47.56, 47.8] }, { text: " it!", timestamp: [47.8, 48.92 /* 50.92 */] }, { text: " If", timestamp: [50.92, 52.96] }, { text: " you're", timestamp: [52.96, 53.28] }, { text: " tired", timestamp: [53.28, 53.44] }, { text: " of", timestamp: [53.44, 53.72] }, { text: " starting", timestamp: [53.72, 53.96] }, { text: " over,", timestamp: [53.96, 54.86 /* 55.5 */] }, { text: " stop", timestamp: [55.5, 56.06] }, { text: " giving", timestamp: [56.06, 56.86] }, { text: " up.", timestamp: [56.86, 57.9 /* 60.92 */] }, ], }, ]; compare( tokenizer._decode_asr(model_outputs, { return_timestamps: "word", time_precision: 0.02, force_full_sequences: false, }), target, 1e-2, ); }, MAX_EXECUTION_TIME, ); it( "should handle overlapping edge case", async () => { const tokenizer = await WhisperTokenizer.from_pretrained("onnx-community/whisper-tiny.en_timestamped"); const model_outputs = [ { stride: [30, 0, 5], tokens: [50257n, 50362n, 8410n, 7283n, 0n, 2329n, 8410n, 7283n, 0n, 2094n, 470n, 1309n, 534n, 10625n, 307n, 10625n, 13n, 34668n, 11n, 345n, 531n, 9439n, 11n, 523n, 655n, 8410n, 7283n, 0n, 39134n, 16592n, 10560n, 3955n, 50n, 0n, 7102n, 5446n, 46n, 0n, 25848n, 8410n, 7283n, 0n, 2773n, 661n, 4320n, 1943n, 981n, 345n, 821n, 8066n, 7765n, 510n, 290n, 670n, 1327n, 379n, 340n, 13n, 10528n, 318n, 5340n, 13n, 50256n], token_timestamps: [0, 0, 0, 3.78, 4.22, 5.26, 6.04, 6.54, 7, 7.94, 8.58, 8.58, 8.88, 9.16, 9.54, 9.94, 10.6, 11.38, 11.88, 12.38, 12.44, 12.62, 13, 13.36, 13.64, 14.24, 14.74, 15.12, 15.4, 15.74, 16.1, 16.54, 16.54, 16.78, 17.08, 17.2, 17.36, 17.56, 18.08, 18.58, 19.38, 19.88, 22.54, 22.9, 23.24, 23.5, 24.14, 24.56, 24.7, 24.94, 24.94, 25.18, 25.54, 25.72, 26.04, 26.34, 26.46, 26.84, 27.04, 27.14, 27.54, 28.06, 29.92], }, { stride: [30, 5, 5], tokens: [50257n, 50362n, 2773n, 661n, 4320n, 1943n, 981n, 345n, 821n, 8066n, 7765n, 510n, 290n, 670n, 1327n, 379n, 340n, 13n, 10528n, 318n, 5340n, 13n, 921n, 815n, 651n, 284n, 262n, 966n, 810n, 2687n, 2073n, 561n, 11238n, 290n, 345n, 821n, 407n, 8066n, 2245n, 612n, 13n, 1400n, 11n, 644n, 389n, 345n, 4953n, 329n, 30n, 2141n, 340n, 0n, 2329n, 466n, 340n, 0n, 3363n, 11n, 345n, 460n, 0n, 2329n, 466n, 340n, 0n, 50256n], token_timestamps: [0, 0, 0, 2.92, 3.24, 3.5, 4.14, 4.56, 4.7, 4.74, 4.92, 5.18, 5.54, 5.74, 6.04, 6.34, 6.46, 6.84, 7.04, 7.18, 7.56, 8.12, 9.68, 10.7, 10.88, 11.1, 11.24, 11.48, 11.82, 12.46, 12.82, 13.2, 13.46, 13.72, 14.08, 14.28, 14.34, 14.56, 14.82, 15.16, 15.72, 16.42, 16.82, 16.86, 17, 17.1, 17.2, 17.56, 18.06, 19.28, 19.6, 20.28, 21.96, 22.64, 24.28, 24.76, 25.18, 25.56, 25.56, 25.84, 26.36, 27.12, 27.54, 27.82, 28.16, 29.48], }, { stride: [23.7728125, 5, 0], tokens: [50257n, 50362n, 2329n, 466n, 340n, 0n, 3363n, 345n, 460n, 0n, 2329n, 466n, 340n, 0n, 1002n, 534n, 15867n, 318n, 3599n, 625n, 11n, 2245n, 3501n, 510n, 13n, 50256n], token_timestamps: [0, 0, 0, 2.44, 4.3, 5.04, 5.06, 5.56, 5.8, 6.32, 7.12, 7.56, 7.8, 8.72, 10.04, 12.96, 13.3, 13.44, 13.72, 13.98, 14.86, 15.5, 16, 16.88, 17.76, 20.9], }, ]; const target = [ " DO IT! Just DO IT! Don't let your dreams be dreams. Yesterday, you said tomorrow, so just DO IT! MAKE YOUR DRIMS! CONTRO! JUST DO IT! Some people dream success while you're gonna wake up and work hard at it. Nothing is impossible. You should get to the point where anyone else would quit and you're not gonna stop there. No, what are you waiting for? Do it! Just do it! Yes, you can! Just do it! If your tire is starting over, stop giving up.", { chunks: [ { text: " DO", timestamp: [0, 3.78] }, { text: " IT!", timestamp: [3.78, 4.24] }, { text: " Just", timestamp: [5.26, 6.04] }, { text: " DO", timestamp: [6.04, 6.54] }, { text: " IT!", timestamp: [6.54, 7.02] }, { text: " Don't", timestamp: [7.94, 8.58] }, { text: " let", timestamp: [8.58, 8.88] }, { text: " your", timestamp: [8.88, 9.16] }, { text: " dreams", timestamp: [9.16, 9.54] }, { text: " be", timestamp: [9.54, 9.94] }, { text: " dreams.", timestamp: [9.94, 10.62] }, { text: " Yesterday,", timestamp: [11.38, 11.9] }, { text: " you", timestamp: [12.38, 12.44] }, { text: " said", timestamp: [12.44, 12.62] }, { text: " tomorrow,", timestamp: [12.62, 13.02] }, { text: " so", timestamp: [13.36, 13.64] }, { text: " just", timestamp: [13.64, 14.24] }, { text: " DO", timestamp: [14.24, 14.74] }, { text: " IT!", timestamp: [14.74, 15.14] }, { text: " MAKE", timestamp: [15.4, 15.74] }, { text: " YOUR", timestamp: [15.74, 16.1] }, { text: " DRIMS!", timestamp: [16.1, 16.8] }, { text: " CONTRO!", timestamp: [17.08, 17.58] }, { text: " JUST", timestamp: [18.08, 18.58] }, { text: " DO", timestamp: [18.58, 19.38] }, { text: " IT!", timestamp: [19.38, 19.9] }, { text: " Some", timestamp: [22.54, 22.9] }, { text: " people", timestamp: [22.9, 23.24] }, { text: " dream", timestamp: [23.24, 23.5] }, { text: " success", timestamp: [23.5, 24.14] }, { text: " while", timestamp: [24.14, 24.56] }, { text: " you're", timestamp: [24.56, 24.94] }, { text: " gonna", timestamp: [24.94, 24.94] }, { text: " wake", timestamp: [24.94, 25.18] }, { text: " up", timestamp: [25.18, 25.54] }, { text: " and", timestamp: [25.54, 25.74] }, { text: " work", timestamp: [25.74, 26.04] }, { text: " hard", timestamp: [26.04, 26.34] }, { text: " at", timestamp: [26.34, 26.46] }, { text: " it.", timestamp: [26.46, 26.86] }, { text: " Nothing", timestamp: [27.04, 27.18] }, { text: " is", timestamp: [27.18, 27.56] }, { text: " impossible.", timestamp: [27.56, 28.14] }, { text: " You", timestamp: [29.68, 30.7] }, { text: " should", timestamp: [30.7, 30.88] }, { text: " get", timestamp: [30.88, 31.1] }, { text: " to", timestamp: [31.1, 31.24] }, { text: " the", timestamp: [31.24, 31.48] }, { text: " point", timestamp: [31.48, 31.82] }, { text: " where", timestamp: [31.82, 32.46] }, { text: " anyone", timestamp: [32.46, 32.82] }, { text: " else", timestamp: [32.82, 33.2] }, { text: " would", timestamp: [33.2, 33.46] }, { text: " quit", timestamp: [33.46, 33.72] }, { text: " and", timestamp: [33.72, 34.08] }, { text: " you're", timestamp: [34.08, 34.34] }, { text: " not", timestamp: [34.34, 34.56] }, { text: " gonna", timestamp: [34.56, 34.82] }, { text: " stop", timestamp: [34.82, 35.16] }, { text: " there.", timestamp: [35.16, 35.74] }, { text: " No,", timestamp: [36.42, 36.84] }, { text: " what", timestamp: [36.86, 37] }, { text: " are", timestamp: [37, 37.1] }, { text: " you", timestamp: [37.1, 37.2] }, { text: " waiting", timestamp: [37.2, 37.56] }, { text: " for?", timestamp: [37.56, 38.08] }, { text: " Do", timestamp: [39.28, 39.6] }, { text: " it!", timestamp: [39.6, 40.3] }, { text: " Just", timestamp: [41.96, 42.64] }, { text: " do", timestamp: [42.64, 44.28] }, { text: " it!", timestamp: [44.28, 44.78] }, { text: " Yes,", timestamp: [45.18, 45.56] }, { text: " you", timestamp: [45.56, 45.84] }, { text: " can!", timestamp: [45.8, 46.34] }, { text: " Just", timestamp: [47.12, 47.56] }, { text: " do", timestamp: [47.56, 47.8] }, { text: " it!", timestamp: [47.8, 48.74] }, { text: " If", timestamp: [50.04, 52.96] }, { text: " your", timestamp: [52.96, 53.3] }, { text: " tire", timestamp: [53.3, 53.44] }, { text: " is", timestamp: [53.44, 53.72] }, { text: " starting", timestamp: [53.72, 53.98] }, { text: " over,", timestamp: [53.98, 54.88] }, { text: " stop", timestamp: [55.5, 56] }, { text: " giving", timestamp: [56, 56.88] }, { text: " up.", timestamp: [56.88, 57.78] }, ], }, ]; compare( tokenizer._decode_asr(model_outputs, { return_timestamps: "word", time_precision: 0.02, force_full_sequences: false, }), target, 1e-2, ); }, MAX_EXECUTION_TIME, ); }); };
transformers.js/tests/models/whisper/test_tokenization_whisper.js/0
{ "file_path": "transformers.js/tests/models/whisper/test_tokenization_whisper.js", "repo_id": "transformers.js", "token_count": 25252 }
356
import { pipeline, ObjectDetectionPipeline } from "../../src/transformers.js"; import { MAX_MODEL_LOAD_TIME, MAX_TEST_EXECUTION_TIME, MAX_MODEL_DISPOSE_TIME, DEFAULT_MODEL_OPTIONS } from "../init.js"; import { load_cached_image } from "../asset_cache.js"; const PIPELINE_ID = "object-detection"; export default () => { describe("Object Detection", () => { describe("yolos", () => { const model_id = "Xenova/yolos-tiny"; /** @type {ObjectDetectionPipeline} */ let pipe; beforeAll(async () => { pipe = await pipeline(PIPELINE_ID, model_id, DEFAULT_MODEL_OPTIONS); }, MAX_MODEL_LOAD_TIME); it("should be an instance of ObjectDetectionPipeline", () => { expect(pipe).toBeInstanceOf(ObjectDetectionPipeline); }); it( "single + threshold", async () => { const image = await load_cached_image("cats"); const output = await pipe(image, { threshold: 0.9 }); const target = [ { score: 0.9921281933784485, label: "remote", box: { xmin: 32, ymin: 78, xmax: 185, ymax: 117 }, }, { score: 0.9884883165359497, label: "remote", box: { xmin: 324, ymin: 82, xmax: 376, ymax: 191 }, }, { score: 0.9197800159454346, label: "cat", box: { xmin: 5, ymin: 56, xmax: 321, ymax: 469 }, }, { score: 0.9300552606582642, label: "cat", box: { xmin: 332, ymin: 25, xmax: 638, ymax: 369 }, }, ]; expect(output).toBeCloseToNested(target, 5); }, MAX_TEST_EXECUTION_TIME, ); afterAll(async () => { await pipe.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); describe("tiny-random", () => { const model_id = "hf-internal-testing/tiny-random-DetrForObjectDetection"; /** @type {ObjectDetectionPipeline} */ let pipe; let images; beforeAll(async () => { pipe = await pipeline(PIPELINE_ID, model_id, DEFAULT_MODEL_OPTIONS); images = await Promise.all([load_cached_image("white_image"), load_cached_image("blue_image")]); }, MAX_MODEL_LOAD_TIME); it("should be an instance of ObjectDetectionPipeline", () => { expect(pipe).toBeInstanceOf(ObjectDetectionPipeline); }); describe("batch_size=1", () => { it( "default (threshold unset)", async () => { const output = await pipe(images[0]); const target = []; expect(output).toBeCloseToNested(target, 5); }, MAX_TEST_EXECUTION_TIME, ); it( "default (threshold=0)", async () => { const output = await pipe(images[0], { threshold: 0 }); const target = [ { score: 0.020360443741083145, label: "LABEL_31", box: { xmin: 56, ymin: 55, xmax: 169, ymax: 167 } }, { score: 0.020360419526696205, label: "LABEL_31", box: { xmin: 56, ymin: 55, xmax: 169, ymax: 167 } }, { score: 0.02036038413643837, label: "LABEL_31", box: { xmin: 56, ymin: 55, xmax: 169, ymax: 167 } }, { score: 0.020360447466373444, label: "LABEL_31", box: { xmin: 56, ymin: 55, xmax: 169, ymax: 167 } }, { score: 0.020360389724373817, label: "LABEL_31", box: { xmin: 56, ymin: 55, xmax: 169, ymax: 167 } }, { score: 0.020360423251986504, label: "LABEL_31", box: { xmin: 56, ymin: 55, xmax: 169, ymax: 167 } }, { score: 0.02036040835082531, label: "LABEL_31", box: { xmin: 56, ymin: 55, xmax: 169, ymax: 167 } }, { score: 0.020360363647341728, label: "LABEL_31", box: { xmin: 56, ymin: 55, xmax: 169, ymax: 167 } }, { score: 0.020360389724373817, label: "LABEL_31", box: { xmin: 56, ymin: 55, xmax: 169, ymax: 167 } }, { score: 0.020360389724373817, label: "LABEL_31", box: { xmin: 56, ymin: 55, xmax: 169, ymax: 167 } }, { score: 0.020360343158245087, label: "LABEL_31", box: { xmin: 56, ymin: 55, xmax: 169, ymax: 167 } }, { score: 0.020360423251986504, label: "LABEL_31", box: { xmin: 56, ymin: 55, xmax: 169, ymax: 167 } }, ]; expect(output).toBeCloseToNested(target, 5); }, MAX_TEST_EXECUTION_TIME, ); }); // TODO: Add batched support to object detection pipeline // describe('batch_size>1', () => { // it('default (threshold unset)', async () => { // const output = await pipe(images); // console.log(output); // const target = []; // expect(output).toBeCloseToNested(target, 5); // }, MAX_TEST_EXECUTION_TIME); // it('default (threshold=0)', async () => { // const output = await pipe(images, { threshold: 0 }); // console.log(output); // const target = []; // expect(output).toBeCloseToNested(target, 5); // }, MAX_TEST_EXECUTION_TIME); // }); afterAll(async () => { await pipe.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); }); };
transformers.js/tests/pipelines/test_pipelines_object_detection.js/0
{ "file_path": "transformers.js/tests/pipelines/test_pipelines_object_detection.js", "repo_id": "transformers.js", "token_count": 2625 }
357
import { PriorityQueue, DictionarySplitter, LRUCache } from "../../src/utils/data-structures.js"; describe("Priority queue", () => { const EXAMPLE_ARRAY = [2, 5, 3, 1, 4]; it("default (max heap)", () => { const queue = new PriorityQueue(); queue.extend(EXAMPLE_ARRAY); expect(queue.pop()).toBe(5); }); it("min heap", () => { const queue = new PriorityQueue((a, b) => a < b); queue.extend(EXAMPLE_ARRAY); expect(queue.pop()).toBe(1); }); it("heap w/ max size", () => { const queue = new PriorityQueue((a, b) => a > b, 3); queue.extend([1, 2, 3, 4, 5, 4, 3, 2, 1]); expect(queue.pop()).toBe(5); // Test with random sizes const sizes = [1, 3, 4, 5, 8, 9, 15, 16, 31, 32, 127, 128]; const arr = Array.from({ length: 100 }, (_) => Math.random()); const max = Math.max(...arr); for (const size of sizes) { const queue = new PriorityQueue((a, b) => a > b, size); queue.extend(arr); expect(queue.pop()).toBe(max); expect(queue.size).toBeLessThanOrEqual(size); } }); }); describe("Dictionary splitter", () => { it("should split on a defined dictionary", () => { const splitter = new DictionarySplitter(["a", "b", "c", "abc"]); const text = ".a.b.cc.abcdef."; const expected = [".", "a", ".", "b", ".", "c", "c", ".", "abc", "def."]; const result = splitter.split(text); expect(result).toEqual(expected); }); it("should handle multi-byte characters", () => { const text = "before🤗after\ud83etest"; const splitter = new DictionarySplitter(["🤗" /* '\ud83e\udd17' */, "\ud83e"]); const expected = ["before", "🤗", "after", "\ud83e", "test"]; const result = splitter.split(text); expect(result).toEqual(expected); }); }); describe("LRUCache", () => { it("should return undefined for non-existent keys", () => { const cache = new LRUCache(2); expect(cache.get("nonexistent")).toEqual(undefined); }); it("should store and retrieve values correctly", () => { const cache = new LRUCache(2); cache.put("a", 1); cache.put("b", 2); expect(cache.get("a")).toEqual(1); expect(cache.get("b")).toEqual(2); }); it("should update the value and refresh the usage", () => { const cache = new LRUCache(2); cache.put("a", 1); cache.put("b", 2); // Update key "a" cache.put("a", 10); expect(cache.get("a")).toEqual(10); // Access "a" so "b" becomes the LRU cache.get("a"); cache.put("c", 3); // "b" should be evicted since it is the least recently used. expect(cache.get("b")).toEqual(undefined); expect(cache.get("c")).toEqual(3); }); it("should evict the least recently used item when capacity is exceeded", () => { const cache = new LRUCache(3); cache.put("a", 1); cache.put("b", 2); cache.put("c", 3); // Access "a" to refresh its recentness. cache.get("a"); // Insert a new key, this should evict "b" as it is the least recently used. cache.put("d", 4); expect(cache.get("b")).toEqual(undefined); expect(cache.get("a")).toEqual(1); expect(cache.get("c")).toEqual(3); expect(cache.get("d")).toEqual(4); }); it("should update the usage order on get", () => { const cache = new LRUCache(3); cache.put("a", "apple"); cache.put("b", "banana"); cache.put("c", "cherry"); // Access "a" making it most recently used. expect(cache.get("a")).toEqual("apple"); // Insert new element to evict the least recently used ("b"). cache.put("d", "date"); expect(cache.get("b")).toEqual(undefined); // "a", "c", and "d" should be present. expect(cache.get("a")).toEqual("apple"); expect(cache.get("c")).toEqual("cherry"); expect(cache.get("d")).toEqual("date"); }); it("should clear the cache", () => { const cache = new LRUCache(2); cache.put("a", 1); cache.put("b", 2); cache.clear(); expect(cache.get("a")).toEqual(undefined); expect(cache.get("b")).toEqual(undefined); }); });
transformers.js/tests/utils/data_structures.test.js/0
{ "file_path": "transformers.js/tests/utils/data_structures.test.js", "repo_id": "transformers.js", "token_count": 1609 }
358
# Security Policy ## Hugging Face Hub, remote artefacts, and remote code Transformers is open-source software that is tightly coupled to the Hugging Face Hub. While you have the ability to use it offline with pre-downloaded model weights, it provides a very simple way to download, use, and manage models locally. When downloading artefacts that have been uploaded by others on any platform, you expose yourself to risks. Please read below for the security recommendations in order to keep your runtime and local environment safe. ### Remote artefacts Models uploaded on the Hugging Face Hub come in different formats. We heavily recommend uploading and downloading models in the [`safetensors`](https://github.com/huggingface/safetensors) format (which is the default prioritized by the transformers library), as developed specifically to prevent arbitrary code execution on your system. To avoid loading models from unsafe formats (e.g. [pickle](https://docs.python.org/3/library/pickle.html), you should use the `use_safetensors` parameter. If doing so, in the event that no .safetensors file is present, transformers will error when loading the model. ### Remote code #### Modeling Transformers supports many model architectures, but is also the bridge between your Python runtime and models that are stored in model repositories on the Hugging Face Hub. These models require the `trust_remote_code=True` parameter to be set when using them; please **always** verify the content of the modeling files when using this argument. We recommend setting a revision in order to ensure you protect yourself from updates on the repository. ## Reporting a Vulnerability Feel free to submit vulnerability reports to [security@huggingface.co](mailto:security@huggingface.co), where someone from the HF security team will review and recommend next steps. If reporting a vulnerability specific to open source, please note [Huntr](https://huntr.com) is a vulnerability disclosure program for open source software.
transformers/SECURITY.md/0
{ "file_path": "transformers/SECURITY.md", "repo_id": "transformers", "token_count": 461 }
359