text
stringlengths
5
631k
id
stringlengths
14
178
metadata
dict
__index_level_0__
int64
0
647
""" Pytorch Inception-Resnet-V2 implementation Sourced from https://github.com/Cadene/tensorflow-model-zoo.torch (MIT License) which is based upon Google's Tensorflow implementation and pretrained weights (Apache 2.0 License) """ from functools import partial import torch import torch.nn as nn from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD from timm.layers import create_classifier, ConvNormAct from ._builder import build_model_with_cfg from ._manipulate import flatten_modules from ._registry import register_model, generate_default_cfgs, register_model_deprecations __all__ = ['InceptionResnetV2'] class Mixed_5b(nn.Module): def __init__(self, conv_block=None): super(Mixed_5b, self).__init__() conv_block = conv_block or ConvNormAct self.branch0 = conv_block(192, 96, kernel_size=1, stride=1) self.branch1 = nn.Sequential( conv_block(192, 48, kernel_size=1, stride=1), conv_block(48, 64, kernel_size=5, stride=1, padding=2) ) self.branch2 = nn.Sequential( conv_block(192, 64, kernel_size=1, stride=1), conv_block(64, 96, kernel_size=3, stride=1, padding=1), conv_block(96, 96, kernel_size=3, stride=1, padding=1) ) self.branch3 = nn.Sequential( nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), conv_block(192, 64, kernel_size=1, stride=1) ) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) x2 = self.branch2(x) x3 = self.branch3(x) out = torch.cat((x0, x1, x2, x3), 1) return out class Block35(nn.Module): def __init__(self, scale=1.0, conv_block=None): super(Block35, self).__init__() self.scale = scale conv_block = conv_block or ConvNormAct self.branch0 = conv_block(320, 32, kernel_size=1, stride=1) self.branch1 = nn.Sequential( conv_block(320, 32, kernel_size=1, stride=1), conv_block(32, 32, kernel_size=3, stride=1, padding=1) ) self.branch2 = nn.Sequential( conv_block(320, 32, kernel_size=1, stride=1), conv_block(32, 48, kernel_size=3, stride=1, padding=1), conv_block(48, 64, kernel_size=3, stride=1, padding=1) ) self.conv2d = nn.Conv2d(128, 320, kernel_size=1, stride=1) self.act = nn.ReLU() def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) x2 = self.branch2(x) out = torch.cat((x0, x1, x2), 1) out = self.conv2d(out) out = out * self.scale + x out = self.act(out) return out class Mixed_6a(nn.Module): def __init__(self, conv_block=None): super(Mixed_6a, self).__init__() conv_block = conv_block or ConvNormAct self.branch0 = conv_block(320, 384, kernel_size=3, stride=2) self.branch1 = nn.Sequential( conv_block(320, 256, kernel_size=1, stride=1), conv_block(256, 256, kernel_size=3, stride=1, padding=1), conv_block(256, 384, kernel_size=3, stride=2) ) self.branch2 = nn.MaxPool2d(3, stride=2) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) x2 = self.branch2(x) out = torch.cat((x0, x1, x2), 1) return out class Block17(nn.Module): def __init__(self, scale=1.0, conv_block=None): super(Block17, self).__init__() self.scale = scale conv_block = conv_block or ConvNormAct self.branch0 = conv_block(1088, 192, kernel_size=1, stride=1) self.branch1 = nn.Sequential( conv_block(1088, 128, kernel_size=1, stride=1), conv_block(128, 160, kernel_size=(1, 7), stride=1, padding=(0, 3)), conv_block(160, 192, kernel_size=(7, 1), stride=1, padding=(3, 0)) ) self.conv2d = nn.Conv2d(384, 1088, kernel_size=1, stride=1) self.act = nn.ReLU() def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) out = torch.cat((x0, x1), 1) out = self.conv2d(out) out = out * self.scale + x out = self.act(out) return out class Mixed_7a(nn.Module): def __init__(self, conv_block=None): super(Mixed_7a, self).__init__() conv_block = conv_block or ConvNormAct self.branch0 = nn.Sequential( conv_block(1088, 256, kernel_size=1, stride=1), conv_block(256, 384, kernel_size=3, stride=2) ) self.branch1 = nn.Sequential( conv_block(1088, 256, kernel_size=1, stride=1), conv_block(256, 288, kernel_size=3, stride=2) ) self.branch2 = nn.Sequential( conv_block(1088, 256, kernel_size=1, stride=1), conv_block(256, 288, kernel_size=3, stride=1, padding=1), conv_block(288, 320, kernel_size=3, stride=2) ) self.branch3 = nn.MaxPool2d(3, stride=2) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) x2 = self.branch2(x) x3 = self.branch3(x) out = torch.cat((x0, x1, x2, x3), 1) return out class Block8(nn.Module): def __init__(self, scale=1.0, no_relu=False, conv_block=None): super(Block8, self).__init__() self.scale = scale conv_block = conv_block or ConvNormAct self.branch0 = conv_block(2080, 192, kernel_size=1, stride=1) self.branch1 = nn.Sequential( conv_block(2080, 192, kernel_size=1, stride=1), conv_block(192, 224, kernel_size=(1, 3), stride=1, padding=(0, 1)), conv_block(224, 256, kernel_size=(3, 1), stride=1, padding=(1, 0)) ) self.conv2d = nn.Conv2d(448, 2080, kernel_size=1, stride=1) self.relu = None if no_relu else nn.ReLU() def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) out = torch.cat((x0, x1), 1) out = self.conv2d(out) out = out * self.scale + x if self.relu is not None: out = self.relu(out) return out class InceptionResnetV2(nn.Module): def __init__( self, num_classes=1000, in_chans=3, drop_rate=0., output_stride=32, global_pool='avg', norm_layer='batchnorm2d', norm_eps=1e-3, act_layer='relu', ): super(InceptionResnetV2, self).__init__() self.num_classes = num_classes self.num_features = self.head_hidden_size = 1536 assert output_stride == 32 conv_block = partial( ConvNormAct, padding=0, norm_layer=norm_layer, act_layer=act_layer, norm_kwargs=dict(eps=norm_eps), act_kwargs=dict(inplace=True), ) self.conv2d_1a = conv_block(in_chans, 32, kernel_size=3, stride=2) self.conv2d_2a = conv_block(32, 32, kernel_size=3, stride=1) self.conv2d_2b = conv_block(32, 64, kernel_size=3, stride=1, padding=1) self.feature_info = [dict(num_chs=64, reduction=2, module='conv2d_2b')] self.maxpool_3a = nn.MaxPool2d(3, stride=2) self.conv2d_3b = conv_block(64, 80, kernel_size=1, stride=1) self.conv2d_4a = conv_block(80, 192, kernel_size=3, stride=1) self.feature_info += [dict(num_chs=192, reduction=4, module='conv2d_4a')] self.maxpool_5a = nn.MaxPool2d(3, stride=2) self.mixed_5b = Mixed_5b(conv_block=conv_block) self.repeat = nn.Sequential(*[Block35(scale=0.17, conv_block=conv_block) for _ in range(10)]) self.feature_info += [dict(num_chs=320, reduction=8, module='repeat')] self.mixed_6a = Mixed_6a(conv_block=conv_block) self.repeat_1 = nn.Sequential(*[Block17(scale=0.10, conv_block=conv_block) for _ in range(20)]) self.feature_info += [dict(num_chs=1088, reduction=16, module='repeat_1')] self.mixed_7a = Mixed_7a(conv_block=conv_block) self.repeat_2 = nn.Sequential(*[Block8(scale=0.20, conv_block=conv_block) for _ in range(9)]) self.block8 = Block8(no_relu=True, conv_block=conv_block) self.conv2d_7b = conv_block(2080, self.num_features, kernel_size=1, stride=1) self.feature_info += [dict(num_chs=self.num_features, reduction=32, module='conv2d_7b')] self.global_pool, self.head_drop, self.classif = create_classifier( self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate) @torch.jit.ignore def group_matcher(self, coarse=False): module_map = {k: i for i, (k, _) in enumerate(flatten_modules(self.named_children(), prefix=()))} module_map.pop(('classif',)) def _matcher(name): if any([name.startswith(n) for n in ('conv2d_1', 'conv2d_2')]): return 0 elif any([name.startswith(n) for n in ('conv2d_3', 'conv2d_4')]): return 1 elif any([name.startswith(n) for n in ('block8', 'conv2d_7')]): return len(module_map) + 1 else: for k in module_map.keys(): if k == tuple(name.split('.')[:len(k)]): return module_map[k] return float('inf') return _matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, "checkpointing not supported" @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.classif def reset_classifier(self, num_classes: int, global_pool: str = 'avg'): self.num_classes = num_classes self.global_pool, self.classif = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): x = self.conv2d_1a(x) x = self.conv2d_2a(x) x = self.conv2d_2b(x) x = self.maxpool_3a(x) x = self.conv2d_3b(x) x = self.conv2d_4a(x) x = self.maxpool_5a(x) x = self.mixed_5b(x) x = self.repeat(x) x = self.mixed_6a(x) x = self.repeat_1(x) x = self.mixed_7a(x) x = self.repeat_2(x) x = self.block8(x) x = self.conv2d_7b(x) return x def forward_head(self, x, pre_logits: bool = False): x = self.global_pool(x) x = self.head_drop(x) return x if pre_logits else self.classif(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_inception_resnet_v2(variant, pretrained=False, **kwargs): return build_model_with_cfg(InceptionResnetV2, variant, pretrained, **kwargs) default_cfgs = generate_default_cfgs({ # ported from http://download.tensorflow.org/models/inception_resnet_v2_2016_08_30.tar.gz 'inception_resnet_v2.tf_in1k': { 'hf_hub_id': 'timm/', 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8), 'crop_pct': 0.8975, 'interpolation': 'bicubic', 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, 'first_conv': 'conv2d_1a.conv', 'classifier': 'classif', }, # As per https://arxiv.org/abs/1705.07204 and # ported from http://download.tensorflow.org/models/ens_adv_inception_resnet_v2_2017_08_18.tar.gz 'inception_resnet_v2.tf_ens_adv_in1k': { 'hf_hub_id': 'timm/', 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8), 'crop_pct': 0.8975, 'interpolation': 'bicubic', 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, 'first_conv': 'conv2d_1a.conv', 'classifier': 'classif', } }) @register_model def inception_resnet_v2(pretrained=False, **kwargs) -> InceptionResnetV2: return _create_inception_resnet_v2('inception_resnet_v2', pretrained=pretrained, **kwargs) register_model_deprecations(__name__, { 'ens_adv_inception_resnet_v2': 'inception_resnet_v2.tf_ens_adv_in1k', })
pytorch-image-models/timm/models/inception_resnet_v2.py/0
{ "file_path": "pytorch-image-models/timm/models/inception_resnet_v2.py", "repo_id": "pytorch-image-models", "token_count": 6025 }
266
""" Next-ViT As described in https://arxiv.org/abs/2207.05501 Next-ViT model defs and weights adapted from https://github.com/bytedance/Next-ViT, original copyright below """ # Copyright (c) ByteDance Inc. All rights reserved. from functools import partial from typing import List, Optional, Tuple, Union import torch import torch.nn.functional as F from torch import nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import DropPath, trunc_normal_, ConvMlp, get_norm_layer, get_act_layer, use_fused_attn from timm.layers import ClassifierHead from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._manipulate import checkpoint, checkpoint_seq from ._registry import generate_default_cfgs, register_model __all__ = ['NextViT'] def merge_pre_bn(module, pre_bn_1, pre_bn_2=None): """ Merge pre BN to reduce inference runtime. """ weight = module.weight.data if module.bias is None: zeros = torch.zeros(module.out_chs, device=weight.device).type(weight.type()) module.bias = nn.Parameter(zeros) bias = module.bias.data if pre_bn_2 is None: assert pre_bn_1.track_running_stats is True, "Unsupported bn_module.track_running_stats is False" assert pre_bn_1.affine is True, "Unsupported bn_module.affine is False" scale_invstd = pre_bn_1.running_var.add(pre_bn_1.eps).pow(-0.5) extra_weight = scale_invstd * pre_bn_1.weight extra_bias = pre_bn_1.bias - pre_bn_1.weight * pre_bn_1.running_mean * scale_invstd else: assert pre_bn_1.track_running_stats is True, "Unsupported bn_module.track_running_stats is False" assert pre_bn_1.affine is True, "Unsupported bn_module.affine is False" assert pre_bn_2.track_running_stats is True, "Unsupported bn_module.track_running_stats is False" assert pre_bn_2.affine is True, "Unsupported bn_module.affine is False" scale_invstd_1 = pre_bn_1.running_var.add(pre_bn_1.eps).pow(-0.5) scale_invstd_2 = pre_bn_2.running_var.add(pre_bn_2.eps).pow(-0.5) extra_weight = scale_invstd_1 * pre_bn_1.weight * scale_invstd_2 * pre_bn_2.weight extra_bias = ( scale_invstd_2 * pre_bn_2.weight * (pre_bn_1.bias - pre_bn_1.weight * pre_bn_1.running_mean * scale_invstd_1 - pre_bn_2.running_mean) + pre_bn_2.bias ) if isinstance(module, nn.Linear): extra_bias = weight @ extra_bias weight.mul_(extra_weight.view(1, weight.size(1)).expand_as(weight)) elif isinstance(module, nn.Conv2d): assert weight.shape[2] == 1 and weight.shape[3] == 1 weight = weight.reshape(weight.shape[0], weight.shape[1]) extra_bias = weight @ extra_bias weight.mul_(extra_weight.view(1, weight.size(1)).expand_as(weight)) weight = weight.reshape(weight.shape[0], weight.shape[1], 1, 1) bias.add_(extra_bias) module.weight.data = weight module.bias.data = bias class ConvNormAct(nn.Module): def __init__( self, in_chs, out_chs, kernel_size=3, stride=1, groups=1, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU, ): super(ConvNormAct, self).__init__() self.conv = nn.Conv2d( in_chs, out_chs, kernel_size=kernel_size, stride=stride, padding=1, groups=groups, bias=False) self.norm = norm_layer(out_chs) self.act = act_layer() def forward(self, x): x = self.conv(x) x = self.norm(x) x = self.act(x) return x def _make_divisible(v, divisor, min_value=None): if min_value is None: min_value = divisor new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) # Make sure that round down does not go down by more than 10%. if new_v < 0.9 * v: new_v += divisor return new_v class PatchEmbed(nn.Module): def __init__(self, in_chs, out_chs, stride=1, norm_layer = nn.BatchNorm2d, ): super(PatchEmbed, self).__init__() if stride == 2: self.pool = nn.AvgPool2d((2, 2), stride=2, ceil_mode=True, count_include_pad=False) self.conv = nn.Conv2d(in_chs, out_chs, kernel_size=1, stride=1, bias=False) self.norm = norm_layer(out_chs) elif in_chs != out_chs: self.pool = nn.Identity() self.conv = nn.Conv2d(in_chs, out_chs, kernel_size=1, stride=1, bias=False) self.norm = norm_layer(out_chs) else: self.pool = nn.Identity() self.conv = nn.Identity() self.norm = nn.Identity() def forward(self, x): return self.norm(self.conv(self.pool(x))) class ConvAttention(nn.Module): """ Multi-Head Convolutional Attention """ def __init__(self, out_chs, head_dim, norm_layer = nn.BatchNorm2d, act_layer = nn.ReLU): super(ConvAttention, self).__init__() self.group_conv3x3 = nn.Conv2d( out_chs, out_chs, kernel_size=3, stride=1, padding=1, groups=out_chs // head_dim, bias=False ) self.norm = norm_layer(out_chs) self.act = act_layer() self.projection = nn.Conv2d(out_chs, out_chs, kernel_size=1, bias=False) def forward(self, x): out = self.group_conv3x3(x) out = self.norm(out) out = self.act(out) out = self.projection(out) return out class NextConvBlock(nn.Module): """ Next Convolution Block """ def __init__( self, in_chs, out_chs, stride=1, drop_path=0., drop=0., head_dim=32, mlp_ratio=3., norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU ): super(NextConvBlock, self).__init__() self.in_chs = in_chs self.out_chs = out_chs assert out_chs % head_dim == 0 self.patch_embed = PatchEmbed(in_chs, out_chs, stride, norm_layer=norm_layer) self.mhca = ConvAttention( out_chs, head_dim, norm_layer=norm_layer, act_layer=act_layer, ) self.attn_drop_path = DropPath(drop_path) self.norm = norm_layer(out_chs) self.mlp = ConvMlp( out_chs, hidden_features=int(out_chs * mlp_ratio), drop=drop, bias=True, act_layer=act_layer, ) self.mlp_drop_path = DropPath(drop_path) self.is_fused = False @torch.no_grad() def reparameterize(self): if not self.is_fused: merge_pre_bn(self.mlp.fc1, self.norm) self.norm = nn.Identity() self.is_fused = True def forward(self, x): x = self.patch_embed(x) x = x + self.attn_drop_path(self.mhca(x)) out = self.norm(x) x = x + self.mlp_drop_path(self.mlp(out)) return x class EfficientAttention(nn.Module): """ Efficient Multi-Head Self Attention """ fused_attn: torch.jit.Final[bool] def __init__( self, dim, out_dim=None, head_dim=32, qkv_bias=True, attn_drop=0., proj_drop=0., sr_ratio=1, norm_layer=nn.BatchNorm1d, ): super().__init__() self.dim = dim self.out_dim = out_dim if out_dim is not None else dim self.num_heads = self.dim // head_dim self.head_dim = head_dim self.scale = head_dim ** -0.5 self.fused_attn = use_fused_attn() self.q = nn.Linear(dim, self.dim, bias=qkv_bias) self.k = nn.Linear(dim, self.dim, bias=qkv_bias) self.v = nn.Linear(dim, self.dim, bias=qkv_bias) self.proj = nn.Linear(self.dim, self.out_dim) self.attn_drop = nn.Dropout(attn_drop) self.proj_drop = nn.Dropout(proj_drop) self.sr_ratio = sr_ratio self.N_ratio = sr_ratio ** 2 if sr_ratio > 1: self.sr = nn.AvgPool1d(kernel_size=self.N_ratio, stride=self.N_ratio) self.norm = norm_layer(dim) else: self.sr = None self.norm = None def forward(self, x): B, N, C = x.shape q = self.q(x).reshape(B, N, self.num_heads, self.head_dim).permute(0, 2, 1, 3) if self.sr is not None: x = self.sr(x.transpose(1, 2)) x = self.norm(x).transpose(1, 2) k = self.k(x).reshape(B, -1, self.num_heads, self.head_dim).transpose(1, 2) v = self.v(x).reshape(B, -1, self.num_heads, self.head_dim).transpose(1, 2) if self.fused_attn: x = F.scaled_dot_product_attention( q, k, v, dropout_p=self.attn_drop.p if self.training else 0., ) else: q = q * self.scale attn = q @ k.transpose(-1, -2) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class NextTransformerBlock(nn.Module): """ Next Transformer Block """ def __init__( self, in_chs, out_chs, drop_path, stride=1, sr_ratio=1, mlp_ratio=2, head_dim=32, mix_block_ratio=0.75, attn_drop=0., drop=0., norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU, ): super(NextTransformerBlock, self).__init__() self.in_chs = in_chs self.out_chs = out_chs self.mix_block_ratio = mix_block_ratio self.mhsa_out_chs = _make_divisible(int(out_chs * mix_block_ratio), 32) self.mhca_out_chs = out_chs - self.mhsa_out_chs self.patch_embed = PatchEmbed(in_chs, self.mhsa_out_chs, stride) self.norm1 = norm_layer(self.mhsa_out_chs) self.e_mhsa = EfficientAttention( self.mhsa_out_chs, head_dim=head_dim, sr_ratio=sr_ratio, attn_drop=attn_drop, proj_drop=drop, ) self.mhsa_drop_path = DropPath(drop_path * mix_block_ratio) self.projection = PatchEmbed(self.mhsa_out_chs, self.mhca_out_chs, stride=1, norm_layer=norm_layer) self.mhca = ConvAttention( self.mhca_out_chs, head_dim=head_dim, norm_layer=norm_layer, act_layer=act_layer, ) self.mhca_drop_path = DropPath(drop_path * (1 - mix_block_ratio)) self.norm2 = norm_layer(out_chs) self.mlp = ConvMlp( out_chs, hidden_features=int(out_chs * mlp_ratio), act_layer=act_layer, drop=drop, ) self.mlp_drop_path = DropPath(drop_path) self.is_fused = False @torch.no_grad() def reparameterize(self): if not self.is_fused: merge_pre_bn(self.e_mhsa.q, self.norm1) if self.e_mhsa.norm is not None: merge_pre_bn(self.e_mhsa.k, self.norm1, self.e_mhsa.norm) merge_pre_bn(self.e_mhsa.v, self.norm1, self.e_mhsa.norm) self.e_mhsa.norm = nn.Identity() else: merge_pre_bn(self.e_mhsa.k, self.norm1) merge_pre_bn(self.e_mhsa.v, self.norm1) self.norm1 = nn.Identity() merge_pre_bn(self.mlp.fc1, self.norm2) self.norm2 = nn.Identity() self.is_fused = True def forward(self, x): x = self.patch_embed(x) B, C, H, W = x.shape out = self.norm1(x) out = out.reshape(B, C, -1).transpose(-1, -2) out = self.mhsa_drop_path(self.e_mhsa(out)) x = x + out.transpose(-1, -2).reshape(B, C, H, W) out = self.projection(x) out = out + self.mhca_drop_path(self.mhca(out)) x = torch.cat([x, out], dim=1) out = self.norm2(x) x = x + self.mlp_drop_path(self.mlp(out)) return x class NextStage(nn.Module): def __init__( self, in_chs, block_chs, block_types, stride=2, sr_ratio=1, mix_block_ratio=1.0, drop=0., attn_drop=0., drop_path=0., head_dim=32, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU, ): super().__init__() self.grad_checkpointing = False blocks = [] for block_idx, block_type in enumerate(block_types): stride = stride if block_idx == 0 else 1 out_chs = block_chs[block_idx] block_type = block_types[block_idx] dpr = drop_path[block_idx] if isinstance(drop_path, (list, tuple)) else drop_path if block_type is NextConvBlock: layer = NextConvBlock( in_chs, out_chs, stride=stride, drop_path=dpr, drop=drop, head_dim=head_dim, norm_layer=norm_layer, act_layer=act_layer, ) blocks.append(layer) elif block_type is NextTransformerBlock: layer = NextTransformerBlock( in_chs, out_chs, drop_path=dpr, stride=stride, sr_ratio=sr_ratio, head_dim=head_dim, mix_block_ratio=mix_block_ratio, attn_drop=attn_drop, drop=drop, norm_layer=norm_layer, act_layer=act_layer, ) blocks.append(layer) in_chs = out_chs self.blocks = nn.Sequential(*blocks) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable def forward(self, x): if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) return x class NextViT(nn.Module): def __init__( self, in_chans, num_classes=1000, global_pool='avg', stem_chs=(64, 32, 64), depths=(3, 4, 10, 3), strides=(1, 2, 2, 2), sr_ratios=(8, 4, 2, 1), drop_path_rate=0.1, attn_drop_rate=0., drop_rate=0., head_dim=32, mix_block_ratio=0.75, norm_layer=nn.BatchNorm2d, act_layer=None, ): super(NextViT, self).__init__() self.grad_checkpointing = False self.num_classes = num_classes norm_layer = get_norm_layer(norm_layer) if act_layer is None: act_layer = partial(nn.ReLU, inplace=True) else: act_layer = get_act_layer(act_layer) self.stage_out_chs = [ [96] * (depths[0]), [192] * (depths[1] - 1) + [256], [384, 384, 384, 384, 512] * (depths[2] // 5), [768] * (depths[3] - 1) + [1024] ] self.feature_info = [dict( num_chs=sc[-1], reduction=2**(i + 2), module=f'stages.{i}' ) for i, sc in enumerate(self.stage_out_chs)] # Next Hybrid Strategy self.stage_block_types = [ [NextConvBlock] * depths[0], [NextConvBlock] * (depths[1] - 1) + [NextTransformerBlock], [NextConvBlock, NextConvBlock, NextConvBlock, NextConvBlock, NextTransformerBlock] * (depths[2] // 5), [NextConvBlock] * (depths[3] - 1) + [NextTransformerBlock]] self.stem = nn.Sequential( ConvNormAct(in_chans, stem_chs[0], kernel_size=3, stride=2, norm_layer=norm_layer, act_layer=act_layer), ConvNormAct(stem_chs[0], stem_chs[1], kernel_size=3, stride=1, norm_layer=norm_layer, act_layer=act_layer), ConvNormAct(stem_chs[1], stem_chs[2], kernel_size=3, stride=1, norm_layer=norm_layer, act_layer=act_layer), ConvNormAct(stem_chs[2], stem_chs[2], kernel_size=3, stride=2, norm_layer=norm_layer, act_layer=act_layer), ) in_chs = out_chs = stem_chs[-1] stages = [] idx = 0 dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] for stage_idx in range(len(depths)): stage = NextStage( in_chs=in_chs, block_chs=self.stage_out_chs[stage_idx], block_types=self.stage_block_types[stage_idx], stride=strides[stage_idx], sr_ratio=sr_ratios[stage_idx], mix_block_ratio=mix_block_ratio, head_dim=head_dim, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[stage_idx], norm_layer=norm_layer, act_layer=act_layer, ) in_chs = out_chs = self.stage_out_chs[stage_idx][-1] stages += [stage] idx += depths[stage_idx] self.num_features = self.head_hidden_size = out_chs self.stages = nn.Sequential(*stages) self.norm = norm_layer(out_chs) self.head = ClassifierHead(pool_type=global_pool, in_features=out_chs, num_classes=num_classes) self.stage_out_idx = [sum(depths[:idx + 1]) - 1 for idx in range(len(depths))] self._initialize_weights() def _initialize_weights(self): for n, m in self.named_modules(): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if hasattr(m, 'bias') and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Conv2d): trunc_normal_(m.weight, std=.02) if hasattr(m, 'bias') and m.bias is not None: nn.init.constant_(m.bias, 0) @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^stem', # stem and embed blocks=r'^stages\.(\d+)' if coarse else [ (r'^stages\.(\d+)\.blocks\.(\d+)', None), (r'^norm', (99999,)), ] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable for stage in self.stages: stage.set_grad_checkpointing(enable=enable) @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): self.num_classes = num_classes self.head.reset(num_classes, pool_type=global_pool) def forward_intermediates( self, x: torch.Tensor, indices: Optional[Union[int, List[int]]] = None, norm: bool = False, stop_early: bool = False, output_fmt: str = 'NCHW', intermediates_only: bool = False, ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: """ Forward features that returns intermediates. Args: x: Input image tensor indices: Take last n blocks if int, all if None, select matching indices if sequence norm: Apply norm layer to compatible intermediates stop_early: Stop iterating over blocks when last desired intermediate hit output_fmt: Shape of intermediate feature outputs intermediates_only: Only return intermediate features Returns: """ assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' intermediates = [] take_indices, max_index = feature_take_indices(len(self.stages), indices) # forward pass x = self.stem(x) last_idx = len(self.stages) - 1 if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript stages = self.stages else: stages = self.stages[:max_index + 1] for feat_idx, stage in enumerate(stages): if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(stage, x) else: x = stage(x) if feat_idx in take_indices: if feat_idx == last_idx: x_inter = self.norm(x) if norm else x intermediates.append(x_inter) else: intermediates.append(x) if intermediates_only: return intermediates if feat_idx == last_idx: x = self.norm(x) return x, intermediates def prune_intermediate_layers( self, indices: Union[int, List[int]] = 1, prune_norm: bool = False, prune_head: bool = True, ): """ Prune layers not required for specified intermediates. """ take_indices, max_index = feature_take_indices(len(self.stages), indices) self.stages = self.stages[:max_index + 1] # truncate blocks w/ stem as idx 0 if prune_norm: self.norm = nn.Identity() if prune_head: self.reset_classifier(0, '') return take_indices def forward_features(self, x): x = self.stem(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.stages, x) else: x = self.stages(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict, model): """ Remap original checkpoints -> timm """ if 'head.fc.weight' in state_dict: return state_dict # non-original D = model.state_dict() out_dict = {} # remap originals based on order for ka, kb, va, vb in zip(D.keys(), state_dict.keys(), D.values(), state_dict.values()): out_dict[ka] = vb return out_dict def _create_nextvit(variant, pretrained=False, **kwargs): default_out_indices = tuple(i for i, _ in enumerate(kwargs.get('depths', (1, 1, 3, 1)))) out_indices = kwargs.pop('out_indices', default_out_indices) model = build_model_with_cfg( NextViT, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.95, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.0.conv', 'classifier': 'head.fc', **kwargs } default_cfgs = generate_default_cfgs({ 'nextvit_small.bd_in1k': _cfg( hf_hub_id='timm/', ), 'nextvit_base.bd_in1k': _cfg( hf_hub_id='timm/', ), 'nextvit_large.bd_in1k': _cfg( hf_hub_id='timm/', ), 'nextvit_small.bd_in1k_384': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, ), 'nextvit_base.bd_in1k_384': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, ), 'nextvit_large.bd_in1k_384': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, ), 'nextvit_small.bd_ssld_6m_in1k': _cfg( hf_hub_id='timm/', ), 'nextvit_base.bd_ssld_6m_in1k': _cfg( hf_hub_id='timm/', ), 'nextvit_large.bd_ssld_6m_in1k': _cfg( hf_hub_id='timm/', ), 'nextvit_small.bd_ssld_6m_in1k_384': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, ), 'nextvit_base.bd_ssld_6m_in1k_384': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, ), 'nextvit_large.bd_ssld_6m_in1k_384': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, ), }) @register_model def nextvit_small(pretrained=False, **kwargs): model_args = dict(depths=(3, 4, 10, 3), drop_path_rate=0.1) model = _create_nextvit( 'nextvit_small', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def nextvit_base(pretrained=False, **kwargs): model_args = dict(depths=(3, 4, 20, 3), drop_path_rate=0.2) model = _create_nextvit( 'nextvit_base', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def nextvit_large(pretrained=False, **kwargs): model_args = dict(depths=(3, 4, 30, 3), drop_path_rate=0.2) model = _create_nextvit( 'nextvit_large', pretrained=pretrained, **dict(model_args, **kwargs)) return model
pytorch-image-models/timm/models/nextvit.py/0
{ "file_path": "pytorch-image-models/timm/models/nextvit.py", "repo_id": "pytorch-image-models", "token_count": 13453 }
267
""" SEResNet implementation from Cadene's pretrained models https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/senet.py Additional credit to https://github.com/creafz Original model: https://github.com/hujie-frank/SENet ResNet code gently borrowed from https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py FIXME I'm deprecating this model and moving them to ResNet as I don't want to maintain duplicate support for extras like dilation, switchable BN/activations, feature extraction, etc that don't exist here. """ import math from collections import OrderedDict import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import create_classifier from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs __all__ = ['SENet'] def _weight_init(m): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1.) nn.init.constant_(m.bias, 0.) class SEModule(nn.Module): def __init__(self, channels, reduction): super(SEModule, self).__init__() self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1) self.relu = nn.ReLU(inplace=True) self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1) self.sigmoid = nn.Sigmoid() def forward(self, x): module_input = x x = x.mean((2, 3), keepdim=True) x = self.fc1(x) x = self.relu(x) x = self.fc2(x) x = self.sigmoid(x) return module_input * x class Bottleneck(nn.Module): """ Base class for bottlenecks that implements `forward()` method. """ def forward(self, x): shortcut = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: shortcut = self.downsample(x) out = self.se_module(out) + shortcut out = self.relu(out) return out class SEBottleneck(Bottleneck): """ Bottleneck for SENet154. """ expansion = 4 def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None): super(SEBottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes * 2, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes * 2) self.conv2 = nn.Conv2d( planes * 2, planes * 4, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False) self.bn2 = nn.BatchNorm2d(planes * 4) self.conv3 = nn.Conv2d(planes * 4, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.se_module = SEModule(planes * 4, reduction=reduction) self.downsample = downsample self.stride = stride class SEResNetBottleneck(Bottleneck): """ ResNet bottleneck with a Squeeze-and-Excitation module. It follows Caffe implementation and uses `stride=stride` in `conv1` and not in `conv2` (the latter is used in the torchvision implementation of ResNet). """ expansion = 4 def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None): super(SEResNetBottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False, stride=stride) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, groups=groups, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.se_module = SEModule(planes * 4, reduction=reduction) self.downsample = downsample self.stride = stride class SEResNeXtBottleneck(Bottleneck): """ ResNeXt bottleneck type C with a Squeeze-and-Excitation module. """ expansion = 4 def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None, base_width=4): super(SEResNeXtBottleneck, self).__init__() width = math.floor(planes * (base_width / 64)) * groups self.conv1 = nn.Conv2d(inplanes, width, kernel_size=1, bias=False, stride=1) self.bn1 = nn.BatchNorm2d(width) self.conv2 = nn.Conv2d(width, width, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False) self.bn2 = nn.BatchNorm2d(width) self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.se_module = SEModule(planes * 4, reduction=reduction) self.downsample = downsample self.stride = stride class SEResNetBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None): super(SEResNetBlock, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, padding=1, stride=stride, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, groups=groups, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.se_module = SEModule(planes, reduction=reduction) self.downsample = downsample self.stride = stride def forward(self, x): shortcut = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) if self.downsample is not None: shortcut = self.downsample(x) out = self.se_module(out) + shortcut out = self.relu(out) return out class SENet(nn.Module): def __init__( self, block, layers, groups, reduction, drop_rate=0.2, in_chans=3, inplanes=64, input_3x3=False, downsample_kernel_size=1, downsample_padding=0, num_classes=1000, global_pool='avg'): """ Parameters ---------- block (nn.Module): Bottleneck class. - For SENet154: SEBottleneck - For SE-ResNet models: SEResNetBottleneck - For SE-ResNeXt models: SEResNeXtBottleneck layers (list of ints): Number of residual blocks for 4 layers of the network (layer1...layer4). groups (int): Number of groups for the 3x3 convolution in each bottleneck block. - For SENet154: 64 - For SE-ResNet models: 1 - For SE-ResNeXt models: 32 reduction (int): Reduction ratio for Squeeze-and-Excitation modules. - For all models: 16 dropout_p (float or None): Drop probability for the Dropout layer. If `None` the Dropout layer is not used. - For SENet154: 0.2 - For SE-ResNet models: None - For SE-ResNeXt models: None inplanes (int): Number of input channels for layer1. - For SENet154: 128 - For SE-ResNet models: 64 - For SE-ResNeXt models: 64 input_3x3 (bool): If `True`, use three 3x3 convolutions instead of a single 7x7 convolution in layer0. - For SENet154: True - For SE-ResNet models: False - For SE-ResNeXt models: False downsample_kernel_size (int): Kernel size for downsampling convolutions in layer2, layer3 and layer4. - For SENet154: 3 - For SE-ResNet models: 1 - For SE-ResNeXt models: 1 downsample_padding (int): Padding for downsampling convolutions in layer2, layer3 and layer4. - For SENet154: 1 - For SE-ResNet models: 0 - For SE-ResNeXt models: 0 num_classes (int): Number of outputs in `last_linear` layer. - For all models: 1000 """ super(SENet, self).__init__() self.inplanes = inplanes self.num_classes = num_classes self.drop_rate = drop_rate if input_3x3: layer0_modules = [ ('conv1', nn.Conv2d(in_chans, 64, 3, stride=2, padding=1, bias=False)), ('bn1', nn.BatchNorm2d(64)), ('relu1', nn.ReLU(inplace=True)), ('conv2', nn.Conv2d(64, 64, 3, stride=1, padding=1, bias=False)), ('bn2', nn.BatchNorm2d(64)), ('relu2', nn.ReLU(inplace=True)), ('conv3', nn.Conv2d(64, inplanes, 3, stride=1, padding=1, bias=False)), ('bn3', nn.BatchNorm2d(inplanes)), ('relu3', nn.ReLU(inplace=True)), ] else: layer0_modules = [ ('conv1', nn.Conv2d( in_chans, inplanes, kernel_size=7, stride=2, padding=3, bias=False)), ('bn1', nn.BatchNorm2d(inplanes)), ('relu1', nn.ReLU(inplace=True)), ] self.layer0 = nn.Sequential(OrderedDict(layer0_modules)) # To preserve compatibility with Caffe weights `ceil_mode=True` is used instead of `padding=1`. self.pool0 = nn.MaxPool2d(3, stride=2, ceil_mode=True) self.feature_info = [dict(num_chs=inplanes, reduction=2, module='layer0')] self.layer1 = self._make_layer( block, planes=64, blocks=layers[0], groups=groups, reduction=reduction, downsample_kernel_size=1, downsample_padding=0 ) self.feature_info += [dict(num_chs=64 * block.expansion, reduction=4, module='layer1')] self.layer2 = self._make_layer( block, planes=128, blocks=layers[1], stride=2, groups=groups, reduction=reduction, downsample_kernel_size=downsample_kernel_size, downsample_padding=downsample_padding ) self.feature_info += [dict(num_chs=128 * block.expansion, reduction=8, module='layer2')] self.layer3 = self._make_layer( block, planes=256, blocks=layers[2], stride=2, groups=groups, reduction=reduction, downsample_kernel_size=downsample_kernel_size, downsample_padding=downsample_padding ) self.feature_info += [dict(num_chs=256 * block.expansion, reduction=16, module='layer3')] self.layer4 = self._make_layer( block, planes=512, blocks=layers[3], stride=2, groups=groups, reduction=reduction, downsample_kernel_size=downsample_kernel_size, downsample_padding=downsample_padding ) self.feature_info += [dict(num_chs=512 * block.expansion, reduction=32, module='layer4')] self.num_features = self.head_hidden_size = 512 * block.expansion self.global_pool, self.last_linear = create_classifier( self.num_features, self.num_classes, pool_type=global_pool) for m in self.modules(): _weight_init(m) def _make_layer(self, block, planes, blocks, groups, reduction, stride=1, downsample_kernel_size=1, downsample_padding=0): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d( self.inplanes, planes * block.expansion, kernel_size=downsample_kernel_size, stride=stride, padding=downsample_padding, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [block(self.inplanes, planes, groups, reduction, stride, downsample)] self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes, groups, reduction)) return nn.Sequential(*layers) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem=r'^layer0', blocks=r'^layer(\d+)' if coarse else r'^layer(\d+)\.(\d+)') return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.last_linear def reset_classifier(self, num_classes: int, global_pool: str = 'avg'): self.num_classes = num_classes self.global_pool, self.last_linear = create_classifier( self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): x = self.layer0(x) x = self.pool0(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) return x def forward_head(self, x, pre_logits: bool = False): x = self.global_pool(x) if self.drop_rate > 0.: x = F.dropout(x, p=self.drop_rate, training=self.training) return x if pre_logits else self.last_linear(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_senet(variant, pretrained=False, **kwargs): return build_model_with_cfg(SENet, variant, pretrained, **kwargs) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'layer0.conv1', 'classifier': 'last_linear', **kwargs } default_cfgs = generate_default_cfgs({ 'legacy_senet154.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/legacy_senet154-e9eb9fe6.pth'), 'legacy_seresnet18.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet18-4bb0ce65.pth', interpolation='bicubic'), 'legacy_seresnet34.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet34-a4004e63.pth'), 'legacy_seresnet50.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet50-ce0d4300.pth'), 'legacy_seresnet101.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet101-7e38fcc6.pth'), 'legacy_seresnet152.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet152-d17c99b7.pth'), 'legacy_seresnext26_32x4d.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26_32x4d-65ebdb501.pth', interpolation='bicubic'), 'legacy_seresnext50_32x4d.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/legacy_se_resnext50_32x4d-f3651bad.pth'), 'legacy_seresnext101_32x4d.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/legacy_se_resnext101_32x4d-37725eac.pth'), }) @register_model def legacy_seresnet18(pretrained=False, **kwargs) -> SENet: model_args = dict( block=SEResNetBlock, layers=[2, 2, 2, 2], groups=1, reduction=16) return _create_senet('legacy_seresnet18', pretrained, **dict(model_args, **kwargs)) @register_model def legacy_seresnet34(pretrained=False, **kwargs) -> SENet: model_args = dict( block=SEResNetBlock, layers=[3, 4, 6, 3], groups=1, reduction=16) return _create_senet('legacy_seresnet34', pretrained, **dict(model_args, **kwargs)) @register_model def legacy_seresnet50(pretrained=False, **kwargs) -> SENet: model_args = dict( block=SEResNetBottleneck, layers=[3, 4, 6, 3], groups=1, reduction=16) return _create_senet('legacy_seresnet50', pretrained, **dict(model_args, **kwargs)) @register_model def legacy_seresnet101(pretrained=False, **kwargs) -> SENet: model_args = dict( block=SEResNetBottleneck, layers=[3, 4, 23, 3], groups=1, reduction=16) return _create_senet('legacy_seresnet101', pretrained, **dict(model_args, **kwargs)) @register_model def legacy_seresnet152(pretrained=False, **kwargs) -> SENet: model_args = dict( block=SEResNetBottleneck, layers=[3, 8, 36, 3], groups=1, reduction=16) return _create_senet('legacy_seresnet152', pretrained, **dict(model_args, **kwargs)) @register_model def legacy_senet154(pretrained=False, **kwargs) -> SENet: model_args = dict( block=SEBottleneck, layers=[3, 8, 36, 3], groups=64, reduction=16, downsample_kernel_size=3, downsample_padding=1, inplanes=128, input_3x3=True) return _create_senet('legacy_senet154', pretrained, **dict(model_args, **kwargs)) @register_model def legacy_seresnext26_32x4d(pretrained=False, **kwargs) -> SENet: model_args = dict( block=SEResNeXtBottleneck, layers=[2, 2, 2, 2], groups=32, reduction=16) return _create_senet('legacy_seresnext26_32x4d', pretrained, **dict(model_args, **kwargs)) @register_model def legacy_seresnext50_32x4d(pretrained=False, **kwargs) -> SENet: model_args = dict( block=SEResNeXtBottleneck, layers=[3, 4, 6, 3], groups=32, reduction=16) return _create_senet('legacy_seresnext50_32x4d', pretrained, **dict(model_args, **kwargs)) @register_model def legacy_seresnext101_32x4d(pretrained=False, **kwargs) -> SENet: model_args = dict( block=SEResNeXtBottleneck, layers=[3, 4, 23, 3], groups=32, reduction=16) return _create_senet('legacy_seresnext101_32x4d', pretrained, **dict(model_args, **kwargs))
pytorch-image-models/timm/models/senet.py/0
{ "file_path": "pytorch-image-models/timm/models/senet.py", "repo_id": "pytorch-image-models", "token_count": 8381 }
268
""" Hybrid Vision Transformer (ViT) in PyTorch A PyTorch implement of the Hybrid Vision Transformers as described in: 'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale' - https://arxiv.org/abs/2010.11929 `How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers` - https://arxiv.org/abs/2106.10270 NOTE These hybrid model definitions depend on code in vision_transformer.py. They were moved here to keep file sizes sane. Hacked together by / Copyright 2020, Ross Wightman """ from functools import partial from typing import Dict, Tuple, Type, Union import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import StdConv2dSame, StdConv2d, ConvNormAct, to_ntuple, HybridEmbed from ._builder import build_model_with_cfg from ._registry import generate_default_cfgs, register_model, register_model_deprecations from .resnet import resnet26d, resnet50d from .resnetv2 import ResNetV2, create_resnetv2_stem from .vision_transformer import VisionTransformer class ConvStem(nn.Sequential): def __init__( self, in_chans: int = 3, depth: int = 3, channels: Union[int, Tuple[int, ...]] = 64, kernel_size: Union[int, Tuple[int, ...]] = 3, stride: Union[int, Tuple[int, ...]] = (2, 2, 2), padding: Union[str, int, Tuple[int, ...]] = "", norm_layer: Type[nn.Module] = nn.BatchNorm2d, act_layer: Type[nn.Module] = nn.ReLU, ): super().__init__() if isinstance(channels, int): # a default tiered channel strategy channels = tuple([channels // 2**i for i in range(depth)][::-1]) kernel_size = to_ntuple(depth)(kernel_size) padding = to_ntuple(depth)(padding) assert depth == len(stride) == len(kernel_size) == len(channels) in_chs = in_chans for i in range(len(channels)): last_conv = i == len(channels) - 1 self.add_module(f'{i}', ConvNormAct( in_chs, channels[i], kernel_size=kernel_size[i], stride=stride[i], padding=padding[i], bias=last_conv, apply_norm=not last_conv, apply_act=not last_conv, norm_layer=norm_layer, act_layer=act_layer, )) in_chs = channels[i] def _resnetv2(layers=(3, 4, 9), **kwargs): """ ResNet-V2 backbone helper""" padding_same = kwargs.get('padding_same', True) stem_type = 'same' if padding_same else '' conv_layer = partial(StdConv2dSame, eps=1e-8) if padding_same else partial(StdConv2d, eps=1e-8) if len(layers): backbone = ResNetV2( layers=layers, num_classes=0, global_pool='', in_chans=kwargs.get('in_chans', 3), preact=False, stem_type=stem_type, conv_layer=conv_layer) else: backbone = create_resnetv2_stem( kwargs.get('in_chans', 3), stem_type=stem_type, preact=False, conv_layer=conv_layer) return backbone def _convert_mobileclip(state_dict, model, prefix='image_encoder.model.'): out = {} for k, v in state_dict.items(): if not k.startswith(prefix): continue k = k.replace(prefix, '') k = k.replace('patch_emb.', 'patch_embed.backbone.') k = k.replace('block.conv', 'conv') k = k.replace('block.norm', 'bn') k = k.replace('post_transformer_norm.', 'norm.') k = k.replace('pre_norm_mha.0', 'norm1') k = k.replace('pre_norm_mha.1', 'attn') k = k.replace('pre_norm_ffn.0', 'norm2') k = k.replace('pre_norm_ffn.1', 'mlp.fc1') k = k.replace('pre_norm_ffn.4', 'mlp.fc2') k = k.replace('qkv_proj.', 'qkv.') k = k.replace('out_proj.', 'proj.') k = k.replace('transformer.', 'blocks.') if k == 'pos_embed.pos_embed.pos_embed': k = 'pos_embed' v = v.squeeze(0) if 'classifier.proj' in k: bias_k = k.replace('classifier.proj', 'head.bias') k = k.replace('classifier.proj', 'head.weight') v = v.T out[bias_k] = torch.zeros(v.shape[0]) out[k] = v return out def checkpoint_filter_fn( state_dict: Dict[str, torch.Tensor], model: VisionTransformer, interpolation: str = 'bicubic', antialias: bool = True, ) -> Dict[str, torch.Tensor]: from .vision_transformer import checkpoint_filter_fn as _filter_fn if 'image_encoder.model.patch_emb.0.block.conv.weight' in state_dict: state_dict = _convert_mobileclip(state_dict, model) return _filter_fn(state_dict, model, interpolation=interpolation, antialias=antialias) def _create_vision_transformer_hybrid(variant, backbone, embed_args=None, pretrained=False, **kwargs): out_indices = kwargs.pop('out_indices', 3) embed_args = embed_args or {} embed_layer = partial(HybridEmbed, backbone=backbone, **embed_args) kwargs.setdefault('embed_layer', embed_layer) kwargs.setdefault('patch_size', 1) # default patch size for hybrid models if not set return build_model_with_cfg( VisionTransformer, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), **kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), 'first_conv': 'patch_embed.backbone.stem.conv', 'classifier': 'head', **kwargs } default_cfgs = generate_default_cfgs({ # hybrid in-1k models (weights from official JAX impl where they exist) 'vit_tiny_r_s16_p8_224.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/R_Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz', hf_hub_id='timm/', custom_load=True, first_conv='patch_embed.backbone.conv'), 'vit_tiny_r_s16_p8_384.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/R_Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', hf_hub_id='timm/', first_conv='patch_embed.backbone.conv', input_size=(3, 384, 384), crop_pct=1.0, custom_load=True), 'vit_small_r26_s32_224.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/R26_S_32-i21k-300ep-lr_0.001-aug_light0-wd_0.03-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.03-res_224.npz', hf_hub_id='timm/', custom_load=True, ), 'vit_small_r26_s32_384.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/R26_S_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', hf_hub_id='timm/', input_size=(3, 384, 384), crop_pct=1.0, custom_load=True), 'vit_base_r26_s32_224.untrained': _cfg(), 'vit_base_r50_s16_384.orig_in21k_ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_resnet50_384-9fd3c705.pth', hf_hub_id='timm/', input_size=(3, 384, 384), crop_pct=1.0), 'vit_large_r50_s32_224.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/R50_L_32-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_224.npz', hf_hub_id='timm/', custom_load=True, ), 'vit_large_r50_s32_384.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/R50_L_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz', hf_hub_id='timm/', input_size=(3, 384, 384), crop_pct=1.0, custom_load=True, ), # hybrid in-21k models (weights from official Google JAX impl where they exist) 'vit_tiny_r_s16_p8_224.augreg_in21k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/R_Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0.npz', hf_hub_id='timm/', num_classes=21843, crop_pct=0.9, first_conv='patch_embed.backbone.conv', custom_load=True), 'vit_small_r26_s32_224.augreg_in21k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/R26_S_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.03-do_0.0-sd_0.0.npz', hf_hub_id='timm/', num_classes=21843, crop_pct=0.9, custom_load=True), 'vit_base_r50_s16_224.orig_in21k': _cfg( #url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_resnet50_224_in21k-6f7c7740.pth', hf_hub_id='timm/', num_classes=0, crop_pct=0.9), 'vit_large_r50_s32_224.augreg_in21k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/R50_L_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.0-sd_0.0.npz', hf_hub_id='timm/', num_classes=21843, crop_pct=0.9, custom_load=True), # hybrid models (using timm resnet backbones) 'vit_small_resnet26d_224.untrained': _cfg( mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'), 'vit_small_resnet50d_s16_224.untrained': _cfg( mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'), 'vit_base_resnet26d_224.untrained': _cfg( mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'), 'vit_base_resnet50d_224.untrained': _cfg( mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'), 'vit_base_mci_224.apple_mclip_lt': _cfg( hf_hub_id='apple/mobileclip_b_lt_timm', url='https://docs-assets.developer.apple.com/ml-research/datasets/mobileclip/mobileclip_blt.pt', num_classes=512, mean=(0., 0., 0.), std=(1., 1., 1.), first_conv='patch_embed.backbone.0.conv', ), 'vit_base_mci_224.apple_mclip': _cfg( hf_hub_id='apple/mobileclip_b_timm', url='https://docs-assets.developer.apple.com/ml-research/datasets/mobileclip/mobileclip_b.pt', num_classes=512, mean=(0., 0., 0.), std=(1., 1., 1.), first_conv='patch_embed.backbone.0.conv', ), }) @register_model def vit_tiny_r_s16_p8_224(pretrained=False, **kwargs) -> VisionTransformer: """ R+ViT-Ti/S16 w/ 8x8 patch hybrid @ 224 x 224. """ backbone = _resnetv2(layers=(), **kwargs) model_args = dict(patch_size=8, embed_dim=192, depth=12, num_heads=3) model = _create_vision_transformer_hybrid( 'vit_tiny_r_s16_p8_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_tiny_r_s16_p8_384(pretrained=False, **kwargs) -> VisionTransformer: """ R+ViT-Ti/S16 w/ 8x8 patch hybrid @ 384 x 384. """ backbone = _resnetv2(layers=(), **kwargs) model_args = dict(patch_size=8, embed_dim=192, depth=12, num_heads=3) model = _create_vision_transformer_hybrid( 'vit_tiny_r_s16_p8_384', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_r26_s32_224(pretrained=False, **kwargs) -> VisionTransformer: """ R26+ViT-S/S32 hybrid. """ backbone = _resnetv2((2, 2, 2, 2), **kwargs) model_args = dict(embed_dim=384, depth=12, num_heads=6) model = _create_vision_transformer_hybrid( 'vit_small_r26_s32_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_r26_s32_384(pretrained=False, **kwargs) -> VisionTransformer: """ R26+ViT-S/S32 hybrid. """ backbone = _resnetv2((2, 2, 2, 2), **kwargs) model_args = dict(embed_dim=384, depth=12, num_heads=6) model = _create_vision_transformer_hybrid( 'vit_small_r26_s32_384', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_r26_s32_224(pretrained=False, **kwargs) -> VisionTransformer: """ R26+ViT-B/S32 hybrid. """ backbone = _resnetv2((2, 2, 2, 2), **kwargs) model_args = dict(embed_dim=768, depth=12, num_heads=12) model = _create_vision_transformer_hybrid( 'vit_base_r26_s32_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_r50_s16_224(pretrained=False, **kwargs) -> VisionTransformer: """ R50+ViT-B/S16 hybrid from original paper (https://arxiv.org/abs/2010.11929). """ backbone = _resnetv2((3, 4, 9), **kwargs) model_args = dict(embed_dim=768, depth=12, num_heads=12) model = _create_vision_transformer_hybrid( 'vit_base_r50_s16_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_r50_s16_384(pretrained=False, **kwargs) -> VisionTransformer: """ R50+ViT-B/16 hybrid from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. """ backbone = _resnetv2((3, 4, 9), **kwargs) model_args = dict(embed_dim=768, depth=12, num_heads=12) model = _create_vision_transformer_hybrid( 'vit_base_r50_s16_384', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_r50_s32_224(pretrained=False, **kwargs) -> VisionTransformer: """ R50+ViT-L/S32 hybrid. """ backbone = _resnetv2((3, 4, 6, 3), **kwargs) model_args = dict(embed_dim=1024, depth=24, num_heads=16) model = _create_vision_transformer_hybrid( 'vit_large_r50_s32_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_r50_s32_384(pretrained=False, **kwargs) -> VisionTransformer: """ R50+ViT-L/S32 hybrid. """ backbone = _resnetv2((3, 4, 6, 3), **kwargs) model_args = dict(embed_dim=1024, depth=24, num_heads=16) model = _create_vision_transformer_hybrid( 'vit_large_r50_s32_384', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_resnet26d_224(pretrained=False, **kwargs) -> VisionTransformer: """ Custom ViT small hybrid w/ ResNet26D stride 32. No pretrained weights. """ backbone = resnet26d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[4]) model_args = dict(embed_dim=768, depth=8, num_heads=8, mlp_ratio=3) model = _create_vision_transformer_hybrid( 'vit_small_resnet26d_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_resnet50d_s16_224(pretrained=False, **kwargs) -> VisionTransformer: """ Custom ViT small hybrid w/ ResNet50D 3-stages, stride 16. No pretrained weights. """ backbone = resnet50d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[3]) model_args = dict(embed_dim=768, depth=8, num_heads=8, mlp_ratio=3) model = _create_vision_transformer_hybrid( 'vit_small_resnet50d_s16_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_resnet26d_224(pretrained=False, **kwargs) -> VisionTransformer: """ Custom ViT base hybrid w/ ResNet26D stride 32. No pretrained weights. """ backbone = resnet26d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[4]) model_args = dict(embed_dim=768, depth=12, num_heads=12) model = _create_vision_transformer_hybrid( 'vit_base_resnet26d_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_resnet50d_224(pretrained=False, **kwargs) -> VisionTransformer: """ Custom ViT base hybrid w/ ResNet50D stride 32. No pretrained weights. """ backbone = resnet50d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[4]) model_args = dict(embed_dim=768, depth=12, num_heads=12) model = _create_vision_transformer_hybrid( 'vit_base_resnet50d_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_mci_224(pretrained=False, **kwargs) -> VisionTransformer: """ Custom ViT base hybrid w/ ResNet50D stride 32. No pretrained weights. """ backbone = ConvStem( channels=(768//4, 768//4, 768), stride=(4, 2, 2), kernel_size=(4, 2, 2), padding=0, in_chans=kwargs.get('in_chans', 3), act_layer=nn.GELU, ) model_args = dict(embed_dim=768, depth=12, num_heads=12, no_embed_class=True) model = _create_vision_transformer_hybrid( 'vit_base_mci_224', backbone=backbone, embed_args=dict(proj=False), pretrained=pretrained, **dict(model_args, **kwargs) ) return model register_model_deprecations(__name__, { 'vit_tiny_r_s16_p8_224_in21k': 'vit_tiny_r_s16_p8_224.augreg_in21k', 'vit_small_r26_s32_224_in21k': 'vit_small_r26_s32_224.augreg_in21k', 'vit_base_r50_s16_224_in21k': 'vit_base_r50_s16_224.orig_in21k', 'vit_base_resnet50_224_in21k': 'vit_base_r50_s16_224.orig_in21k', 'vit_large_r50_s32_224_in21k': 'vit_large_r50_s32_224.augreg_in21k', 'vit_base_resnet50_384': 'vit_base_r50_s16_384.orig_in21k_ft_in1k' })
pytorch-image-models/timm/models/vision_transformer_hybrid.py/0
{ "file_path": "pytorch-image-models/timm/models/vision_transformer_hybrid.py", "repo_id": "pytorch-image-models", "token_count": 8260 }
269
""" AdaHessian Optimizer Lifted from https://github.com/davda54/ada-hessian/blob/master/ada_hessian.py Originally licensed MIT, Copyright 2020, David Samuel """ import torch class Adahessian(torch.optim.Optimizer): """ Implements the AdaHessian algorithm from "ADAHESSIAN: An Adaptive Second OrderOptimizer for Machine Learning" Arguments: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 0.1) betas ((float, float), optional): coefficients used for computing running averages of gradient and the squared hessian trace (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0.0) hessian_power (float, optional): exponent of the hessian trace (default: 1.0) update_each (int, optional): compute the hessian trace approximation only after *this* number of steps (to save time) (default: 1) n_samples (int, optional): how many times to sample `z` for the approximation of the hessian trace (default: 1) """ def __init__( self, params, lr=0.1, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, hessian_power=1.0, update_each=1, n_samples=1, avg_conv_kernel=False, ): if not 0.0 <= lr: raise ValueError(f"Invalid learning rate: {lr}") if not 0.0 <= eps: raise ValueError(f"Invalid epsilon value: {eps}") if not 0.0 <= betas[0] < 1.0: raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}") if not 0.0 <= betas[1] < 1.0: raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}") if not 0.0 <= hessian_power <= 1.0: raise ValueError(f"Invalid Hessian power value: {hessian_power}") self.n_samples = n_samples self.update_each = update_each self.avg_conv_kernel = avg_conv_kernel # use a separate generator that deterministically generates the same `z`s across all GPUs in case of distributed training self.seed = 2147483647 self.generator = torch.Generator().manual_seed(self.seed) defaults = dict( lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, hessian_power=hessian_power, ) super(Adahessian, self).__init__(params, defaults) for p in self.get_params(): p.hess = 0.0 self.state[p]["hessian step"] = 0 @property def is_second_order(self): return True def get_params(self): """ Gets all parameters in all param_groups with gradients """ return (p for group in self.param_groups for p in group['params'] if p.requires_grad) def zero_hessian(self): """ Zeros out the accumulated hessian traces. """ for p in self.get_params(): if not isinstance(p.hess, float) and self.state[p]["hessian step"] % self.update_each == 0: p.hess.zero_() @torch.no_grad() def set_hessian(self): """ Computes the Hutchinson approximation of the hessian trace and accumulates it for each trainable parameter. """ params = [] for p in filter(lambda p: p.grad is not None, self.get_params()): if self.state[p]["hessian step"] % self.update_each == 0: # compute the trace only each `update_each` step params.append(p) self.state[p]["hessian step"] += 1 if len(params) == 0: return if self.generator.device != params[0].device: # hackish way of casting the generator to the right device self.generator = torch.Generator(params[0].device).manual_seed(self.seed) grads = [p.grad for p in params] for i in range(self.n_samples): # Rademacher distribution {-1.0, 1.0} zs = [torch.randint(0, 2, p.size(), generator=self.generator, device=p.device) * 2.0 - 1.0 for p in params] h_zs = torch.autograd.grad( grads, params, grad_outputs=zs, only_inputs=True, retain_graph=i < self.n_samples - 1) for h_z, z, p in zip(h_zs, zs, params): p.hess += h_z * z / self.n_samples # approximate the expected values of z*(H@z) @torch.no_grad() def step(self, closure=None): """ Performs a single optimization step. Arguments: closure (callable, optional) -- a closure that reevaluates the model and returns the loss (default: None) """ loss = None if closure is not None: loss = closure() self.zero_hessian() self.set_hessian() for group in self.param_groups: for p in group['params']: if p.grad is None or p.hess is None: continue if self.avg_conv_kernel and p.dim() == 4: p.hess = torch.abs(p.hess).mean(dim=[2, 3], keepdim=True).expand_as(p.hess).clone() # Perform correct stepweight decay as in AdamW p.mul_(1 - group['lr'] * group['weight_decay']) state = self.state[p] # State initialization if len(state) == 1: state['step'] = 0 # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p) # Exponential moving average of Hessian diagonal square values state['exp_hessian_diag_sq'] = torch.zeros_like(p) exp_avg, exp_hessian_diag_sq = state['exp_avg'], state['exp_hessian_diag_sq'] beta1, beta2 = group['betas'] state['step'] += 1 # Decay the first and second moment running average coefficient exp_avg.mul_(beta1).add_(p.grad, alpha=1 - beta1) exp_hessian_diag_sq.mul_(beta2).addcmul_(p.hess, p.hess, value=1 - beta2) bias_correction1 = 1 - beta1 ** state['step'] bias_correction2 = 1 - beta2 ** state['step'] k = group['hessian_power'] denom = (exp_hessian_diag_sq / bias_correction2).pow_(k / 2).add_(group['eps']) # make update step_size = group['lr'] / bias_correction1 p.addcdiv_(exp_avg, denom, value=-step_size) return loss
pytorch-image-models/timm/optim/adahessian.py/0
{ "file_path": "pytorch-image-models/timm/optim/adahessian.py", "repo_id": "pytorch-image-models", "token_count": 3131 }
270
# lots of uses of these functions directly, ala 'import timm.optim.optim_factory as optim_factory', fun :/ from ._optim_factory import create_optimizer, create_optimizer_v2, optimizer_kwargs from ._param_groups import param_groups_layer_decay, param_groups_weight_decay, group_parameters, _layer_map, _group import warnings warnings.warn(f"Importing from {__name__} is deprecated, please import via timm.optim", FutureWarning)
pytorch-image-models/timm/optim/optim_factory.py/0
{ "file_path": "pytorch-image-models/timm/optim/optim_factory.py", "repo_id": "pytorch-image-models", "token_count": 130 }
271
""" Adaptive Gradient Clipping An impl of AGC, as per (https://arxiv.org/abs/2102.06171): @article{brock2021high, author={Andrew Brock and Soham De and Samuel L. Smith and Karen Simonyan}, title={High-Performance Large-Scale Image Recognition Without Normalization}, journal={arXiv preprint arXiv:}, year={2021} } Code references: * Official JAX impl (paper authors): https://github.com/deepmind/deepmind-research/tree/master/nfnets * Phil Wang's PyTorch gist: https://gist.github.com/lucidrains/0d6560077edac419ab5d3aa29e674d5c Hacked together by / Copyright 2021 Ross Wightman """ import torch def unitwise_norm(x, norm_type=2.0): if x.ndim <= 1: return x.norm(norm_type) else: # works for nn.ConvNd and nn,Linear where output dim is first in the kernel/weight tensor # might need special cases for other weights (possibly MHA) where this may not be true return x.norm(norm_type, dim=tuple(range(1, x.ndim)), keepdim=True) def adaptive_clip_grad(parameters, clip_factor=0.01, eps=1e-3, norm_type=2.0): if isinstance(parameters, torch.Tensor): parameters = [parameters] for p in parameters: if p.grad is None: continue p_data = p.detach() g_data = p.grad.detach() max_norm = unitwise_norm(p_data, norm_type=norm_type).clamp_(min=eps).mul_(clip_factor) grad_norm = unitwise_norm(g_data, norm_type=norm_type) clipped_grad = g_data * (max_norm / grad_norm.clamp(min=1e-6)) new_grads = torch.where(grad_norm < max_norm, g_data, clipped_grad) p.grad.detach().copy_(new_grads)
pytorch-image-models/timm/utils/agc.py/0
{ "file_path": "pytorch-image-models/timm/utils/agc.py", "repo_id": "pytorch-image-models", "token_count": 661 }
272
__version__ = '1.0.20.dev0'
pytorch-image-models/timm/version.py/0
{ "file_path": "pytorch-image-models/timm/version.py", "repo_id": "pytorch-image-models", "token_count": 15 }
273
# Security Policy ## Reporting a Vulnerability To report a security vulnerability, please contact: security@huggingface.co ## Learning More About Security To learn more about running agents more securely, please see the [Secure Code Execution tutorial](docs/source/en/tutorials/secure_code_execution.mdx) which covers sandboxing with E2B, Docker, and WebAssembly. ### Secure Execution Options `smolagents` provides several options for secure code execution: 1. **E2B Sandbox**: Uses [E2B](https://e2b.dev/) to run code in a secure, isolated environment. 2. **Docker Sandbox**: Runs code in an isolated Docker container. 3. **WebAssembly Sandbox**: Executes Python code securely in a sandboxed WebAssembly environment using Pyodide and Deno's secure runtime. We recommend using one of these sandboxed execution options when running untrusted code.
smolagents/SECURITY.md/0
{ "file_path": "smolagents/SECURITY.md", "repo_id": "smolagents", "token_count": 221 }
274
# Agents <Tip warning={true}> Smolagents is an experimental API which is subject to change at any time. Results returned by the agents can vary as the APIs or underlying models are prone to change. </Tip> To learn more about agents and tools make sure to read the [introductory guide](../index). This page contains the API docs for the underlying classes. ## Agents Our agents inherit from [`MultiStepAgent`], which means they can act in multiple steps, each step consisting of one thought, then one tool call and execution. Read more in [this conceptual guide](../conceptual_guides/react). We provide two types of agents, based on the main [`Agent`] class. - [`CodeAgent`] writes its tool calls in Python code (this is the default). - [`ToolCallingAgent`] writes its tool calls in JSON. Both require arguments `model` and list of tools `tools` at initialization. ### Classes of agents [[autodoc]] MultiStepAgent [[autodoc]] CodeAgent [[autodoc]] ToolCallingAgent ### stream_to_gradio [[autodoc]] stream_to_gradio ### GradioUI > [!TIP] > You must have `gradio` installed to use the UI. Please run `pip install smolagents[gradio]` if it's not the case. [[autodoc]] GradioUI ## Prompts [[autodoc]] smolagents.agents.PromptTemplates [[autodoc]] smolagents.agents.PlanningPromptTemplate [[autodoc]] smolagents.agents.ManagedAgentPromptTemplate [[autodoc]] smolagents.agents.FinalAnswerPromptTemplate ## Memory Smolagents use memory to store information across multiple steps. [[autodoc]] smolagents.memory.AgentMemory ## Python code executors [[autodoc]] smolagents.local_python_executor.PythonExecutor ### Local Python executor [[autodoc]] smolagents.local_python_executor.LocalPythonExecutor ### Remote Python executors [[autodoc]] smolagents.remote_executors.RemotePythonExecutor #### E2BExecutor [[autodoc]] smolagents.remote_executors.E2BExecutor #### DockerExecutor [[autodoc]] smolagents.remote_executors.DockerExecutor #### WasmExecutor [[autodoc]] smolagents.remote_executors.WasmExecutor
smolagents/docs/source/en/reference/agents.md/0
{ "file_path": "smolagents/docs/source/en/reference/agents.md", "repo_id": "smolagents", "token_count": 620 }
275
# Agents - गाइडेड टूर [[open-in-colab]] इस गाइडेड विजिट में, आप सीखेंगे कि एक एजेंट कैसे बनाएं, इसे कैसे चलाएं, और अपने यूज-केस के लिए बेहतर काम करने के लिए इसे कैसे कस्टमाइज़ करें। ### अपना Agent बनाना एक मिनिमल एजेंट को इनिशियलाइज़ करने के लिए, आपको कम से कम इन दो आर्ग्यूमेंट्स की आवश्यकता है: - `model`, आपके एजेंट को पावर देने के लिए एक टेक्स्ट-जनरेशन मॉडल - क्योंकि एजेंट एक सिंपल LLM से अलग है, यह एक सिस्टम है जो LLM को अपने इंजन के रूप में उपयोग करता है। आप इनमें से कोई भी विकल्प उपयोग कर सकते हैं: - [`TransformersModel`] `transformers` पाइपलाइन को पहले से इनिशियलाइज़ करता है जो `transformers` का उपयोग करके आपकी लोकल मशीन पर इन्फरेंस चलाने के लिए होता है। - [`InferenceClientModel`] अंदर से `huggingface_hub.InferenceClient` का लाभ उठाता है। - [`LiteLLMModel`] आपको [LiteLLM](https://docs.litellm.ai/) के माध्यम से 100+ अलग-अलग मॉडल्स को कॉल करने देता है! - `tools`, `Tools` की एक लिस्ट जिसे एजेंट टास्क को हल करने के लिए उपयोग कर सकता है। यह एक खाली लिस्ट हो सकती है। आप ऑप्शनल आर्ग्यूमेंट `add_base_tools=True` को परिभाषित करके अपनी `tools` लिस्ट के ऊपर डिफ़ॉल्ट टूलबॉक्स भी जोड़ सकते हैं। एक बार जब आपके पास ये दो आर्ग्यूमेंट्स, `tools` और `model` हैं, तो आप एक एजेंट बना सकते हैं और इसे चला सकते हैं। आप कोई भी LLM उपयोग कर सकते हैं, या तो [Hugging Face API](https://huggingface.co/docs/api-inference/en/index), [transformers](https://github.com/huggingface/transformers/), [ollama](https://ollama.com/), या [LiteLLM](https://www.litellm.ai/) के माध्यम से। <hfoptions id="एक LLM चुनें"> <hfoption id="Hugging Face API"> Hugging Face API टोकन के बिना उपयोग करने के लिए मुफ्त है, लेकिन फिर इसमें रेट लिमिटेशन होगी। गेटेड मॉडल्स तक पहुंचने या PRO अकाउंट के साथ अपनी रेट लिमिट्स बढ़ाने के लिए, आपको एनवायरनमेंट वेरिएबल `HF_TOKEN` सेट करना होगा या `InferenceClientModel` के इनिशियलाइजेशन पर `token` वेरिएबल पास करना होगा। ```python from smolagents import CodeAgent, InferenceClientModel model_id = "meta-llama/Llama-3.3-70B-Instruct" model = InferenceClientModel(model_id=model_id, token="<YOUR_HUGGINGFACEHUB_API_TOKEN>") agent = CodeAgent(tools=[], model=model, add_base_tools=True) agent.run( "Could you give me the 118th number in the Fibonacci sequence?", ) ``` </hfoption> <hfoption id="Local Transformers Model"> ```python from smolagents import CodeAgent, TransformersModel model_id = "meta-llama/Llama-3.2-3B-Instruct" model = TransformersModel(model_id=model_id) agent = CodeAgent(tools=[], model=model, add_base_tools=True) agent.run( "Could you give me the 118th number in the Fibonacci sequence?", ) ``` </hfoption> <hfoption id="OpenAI या Anthropic API"> `LiteLLMModel` का उपयोग करने के लिए, आपको एनवायरनमेंट वेरिएबल `ANTHROPIC_API_KEY` या `OPENAI_API_KEY` सेट करना होगा, या इनिशियलाइजेशन पर `api_key` वेरिएबल पास करना होगा। ```python from smolagents import CodeAgent, LiteLLMModel model = LiteLLMModel(model_id="anthropic/claude-3-5-sonnet-latest", api_key="YOUR_ANTHROPIC_API_KEY") # Could use 'gpt-4o' agent = CodeAgent(tools=[], model=model, add_base_tools=True) agent.run( "Could you give me the 118th number in the Fibonacci sequence?", ) ``` </hfoption> <hfoption id="Ollama"> ```python from smolagents import CodeAgent, LiteLLMModel model = LiteLLMModel( model_id="ollama_chat/llama3.2", # This model is a bit weak for agentic behaviours though api_base="http://localhost:11434", # replace with 127.0.0.1:11434 or remote open-ai compatible server if necessary api_key="YOUR_API_KEY" # replace with API key if necessary num_ctx=8192 # ollama default is 2048 which will fail horribly. 8192 works for easy tasks, more is better. Check https://huggingface.co/spaces/NyxKrage/LLM-Model-VRAM-Calculator to calculate how much VRAM this will need for the selected model. ) agent = CodeAgent(tools=[], model=model, add_base_tools=True) agent.run( "Could you give me the 118th number in the Fibonacci sequence?", ) ``` </hfoption> </hfoptions> #### CodeAgent और ToolCallingAgent [`CodeAgent`] हमारा डिफ़ॉल्ट एजेंट है। यह हर स्टेप पर पायथन कोड स्निपेट्स लिखेगा और एक्जीक्यूट करेगा। डिफ़ॉल्ट रूप से, एक्जीक्यूशन आपके लोकल एनवायरनमेंट में किया जाता है। यह सुरक्षित होना चाहिए क्योंकि केवल वही फ़ंक्शंस कॉल किए जा सकते हैं जो आपने प्रदान किए हैं (विशेष रूप से यदि यह केवल Hugging Face टूल्स हैं) और पूर्व-परिभाषित सुरक्षित फ़ंक्शंस जैसे `print` या `math` मॉड्यूल से फ़ंक्शंस, इसलिए आप पहले से ही सीमित हैं कि क्या एक्जीक्यूट किया जा सकता है। पायथन इंटरप्रेटर डिफ़ॉल्ट रूप से सेफ लिस्ट के बाहर इम्पोर्ट की अनुमति नहीं देता है, इसलिए सबसे स्पष्ट अटैक समस्या नहीं होनी चाहिए। आप अपने [`CodeAgent`] के इनिशियलाइजेशन पर आर्ग्यूमेंट `additional_authorized_imports` में स्ट्रिंग्स की लिस्ट के रूप में अतिरिक्त मॉड्यूल्स को अधिकृत कर सकते हैं। ```py model = InferenceClientModel() agent = CodeAgent(tools=[], model=model, additional_authorized_imports=['requests', 'bs4']) agent.run("Could you get me the title of the page at url 'https://huggingface.co/blog'?") ``` > [!WARNING] > LLM आर्बिट्ररी कोड जनरेट कर सकता है जो फिर एक्जीक्यूट किया जाएगा: कोई असुरक्षित इम्पोर्ट न जोड़ें! एक्जीक्यूशन किसी भी कोड पर रुक जाएगा जो एक अवैध ऑपरेशन करने का प्रयास करता है या यदि एजेंट द्वारा जनरेट किए गए कोड में एक रेगुलर पायथन एरर है। आप [E2B कोड एक्जीक्यूटर](https://e2b.dev/docs#what-is-e2-b) या Docker का उपयोग लोकल पायथन इंटरप्रेटर के बजाय कर सकते हैं। E2B के लिए, पहले [`E2B_API_KEY` एनवायरनमेंट वेरिएबल सेट करें](https://e2b.dev/dashboard?tab=keys) और फिर एजेंट इनिशियलाइजेशन पर `executor_type="e2b"` पास करें। Docker के लिए, इनिशियलाइजेशन के दौरान `executor_type="docker"` पास करें। > [!TIP] > कोड एक्जीक्यूशन के बारे में और जानें [इस ट्यूटोरियल में](tutorials/secure_code_execution)। हम JSON-जैसे ब्लॉब्स के रूप में एक्शन लिखने के व्यापक रूप से उपयोग किए जाने वाले तरीके का भी समर्थन करते हैं: यह [`ToolCallingAgent`] है, यह बहुत कुछ [`CodeAgent`] की तरह ही काम करता है, बेशक `additional_authorized_imports` के बिना क्योंकि यह कोड एक्जीक्यूट नहीं करता। ```py from smolagents import ToolCallingAgent agent = ToolCallingAgent(tools=[], model=model) agent.run("Could you get me the title of the page at url 'https://huggingface.co/blog'?") ``` ### एजेंट रन का निरीक्षण रन के बाद क्या हुआ यह जांचने के लिए यहाँ कुछ उपयोगी एट्रिब्यूट्स हैं: - `agent.logs` एजेंट के फाइन-ग्रेन्ड लॉग्स को स्टोर करता है। एजेंट के रन के हर स्टेप पर, सब कुछ एक डिक्शनरी में स्टोर किया जाता है जो फिर `agent.logs` में जोड़ा जाता है। - `agent.write_memory_to_messages()` चलाने से LLM के लिए एजेंट के लॉग्स की एक इनर मेमोरी बनती है, चैट मैसेज की लिस्ट के रूप में। यह मेथड लॉग के प्रत्येक स्टेप पर जाता है और केवल वही स्टोर करता है जिसमें यह एक मैसेज के रूप में रुचि रखता है: उदाहरण के लिए, यह सिस्टम प्रॉम्प्ट और टास्क को अलग-अलग मैसेज के रूप में सेव करेगा, फिर प्रत्येक स्टेप के लिए यह LLM आउटपुट को एक मैसेज के रूप में और टूल कॉल आउटपुट को दूसरे मैसेज के रूप में स्टोर करेगा। ## टूल्स टूल एक एटॉमिक फ़ंक्शन है जिसे एजेंट द्वारा उपयोग किया जाता है। LLM द्वारा उपयोग किए जाने के लिए, इसे कुछ एट्रिब्यूट्स की भी आवश्यकता होती है जो इसकी API बनाते हैं और LLM को यह बताने के लिए उपयोग किए जाएंगे कि इस टूल को कैसे कॉल करें: - एक नाम - एक विवरण - इनपुट प्रकार और विवरण - एक आउटपुट प्रकार आप उदाहरण के लिए [`PythonInterpreterTool`] को चेक कर सकते हैं: इसमें एक नाम, विवरण, इनपुट विवरण, एक आउटपुट प्रकार, और एक्शन करने के लिए एक `forward` मेथड है। जब एजेंट इनिशियलाइज़ किया जाता है, टूल एट्रिब्यूट्स का उपयोग एक टूल विवरण जनरेट करने के लिए किया जाता है जो एजेंट के सिस्टम प्रॉम्प्ट में बेक किया जाता है। यह एजेंट को बताता है कि वह कौन से टूल्स उपयोग कर सकता है और क्यों। ### डिफ़ॉल्ट टूलबॉक्स `smolagents` एजेंट्स को सशक्त बनाने के लिए एक डिफ़ॉल्ट टूलबॉक्स के साथ आता है, जिसे आप आर्ग्यूमेंट `add_base_tools=True` के साथ अपने एजेंट में इनिशियलाइजेशन पर जोड़ सकते हैं: - **DuckDuckGo वेब सर्च**: DuckDuckGo ब्राउज़र का उपयोग करके वेब सर्च करता है। - **पायथन कोड इंटरप्रेटर**: आपका LLM जनरेटेड पायथन कोड एक सुरक्षित एनवायरनमेंट में चलाता है। यह टूल [`ToolCallingAgent`] में केवल तभी जोड़ा जाएगा जब आप इसे `add_base_tools=True` के साथ इनिशियलाइज़ करते हैं, क्योंकि कोड-बेस्ड एजेंट पहले से ही नेटिव रूप से पायथन कोड एक्जीक्यूट कर सकता है - **ट्रांसक्राइबर**: Whisper-Turbo पर बनाया गया एक स्पीच-टू-टेक्स्ट पाइपलाइन जो ऑडियो को टेक्स्ट में ट्रांसक्राइब करता है। आप मैन्युअल रूप से एक टूल का उपयोग उसके आर्ग्यूमेंट्स के साथ कॉल करके कर सकते हैं। ```python from smolagents import WebSearchTool search_tool = WebSearchTool() print(search_tool("Who's the current president of Russia?")) ``` ### अपने कस्टम टूल बनाएं आप ऐसे उपयोग के मामलों के लिए अपने खुद के टूल बना सकते हैं जो Hugging Face के डिफ़ॉल्ट टूल्स द्वारा कवर नहीं किए गए हैं। उदाहरण के लिए, चलिए एक टूल बनाते हैं जो दिए गए कार्य (task) के लिए हब से सबसे अधिक डाउनलोड किए गए मॉडल को रिटर्न करता है। आप नीचे दिए गए कोड से शुरुआत करेंगे। ```python from huggingface_hub import list_models task = "text-classification" most_downloaded_model = next(iter(list_models(filter=task, sort="downloads", direction=-1))) print(most_downloaded_model.id) ``` यह कोड आसानी से टूल में बदला जा सकता है, बस इसे एक फ़ंक्शन में रैप करें और `tool` डेकोरेटर जोड़ें: यह टूल बनाने का एकमात्र तरीका नहीं है: आप इसे सीधे [`Tool`] का सबक्लास बनाकर भी परिभाषित कर सकते हैं, जो आपको अधिक लचीलापन प्रदान करता है, जैसे भारी क्लास एट्रिब्यूट्स को इनिशियलाइज़ करने की संभावना। चलो देखते हैं कि यह दोनों विकल्पों के लिए कैसे काम करता है: <hfoptions id="build-a-tool"> <hfoption id="@tool के साथ एक फ़ंक्शन को डेकोरेट करें"> ```py from smolagents import tool @tool def model_download_tool(task: str) -> str: """ This is a tool that returns the most downloaded model of a given task on the Hugging Face Hub. It returns the name of the checkpoint. Args: task: The task for which to get the download count. """ most_downloaded_model = next(iter(list_models(filter=task, sort="downloads", direction=-1))) return most_downloaded_model.id ``` फ़ंक्शन को चाहिए: - एक स्पष्ट नाम: नाम टूल के कार्य को स्पष्ट रूप से बताने वाला होना चाहिए ताकि इसे चलाने वाले LLM को आसानी हो। चूंकि यह टूल कार्य के लिए सबसे अधिक डाउनलोड किए गए मॉडल को लौटाता है, इसका नाम `model_download_tool` रखा गया है। - इनपुट और आउटपुट पर टाइप हिंट्स। - एक विवरण: इसमें 'Args:' भाग शामिल होना चाहिए, जिसमें प्रत्येक आर्ग्युमेंट का वर्णन (बिना टाइप संकेत के) किया गया हो। यह विवरण एक निर्देश मैनुअल की तरह होता है जो LLM को टूल चलाने में मदद करता है। इसे अनदेखा न करें। इन सभी तत्वों को एजेंट की सिस्टम प्रॉम्प्ट में स्वचालित रूप से शामिल किया जाएगा: इसलिए इन्हें यथासंभव स्पष्ट बनाने का प्रयास करें! > [!TIP] > यह परिभाषा प्रारूप `apply_chat_template` में उपयोग की गई टूल स्कीमा जैसा ही है, केवल अतिरिक्त `tool` डेकोरेटर जोड़ा गया है: हमारे टूल उपयोग API के बारे में अधिक पढ़ें [यहाँ](https://huggingface.co/blog/unified-tool-use#passing-tools-to-a-chat-template)। </hfoption> <hfoption id="सबक्लास टूल"> ```py from smolagents import Tool class ModelDownloadTool(Tool): name = "model_download_tool" description = "This is a tool that returns the most downloaded model of a given task on the Hugging Face Hub. It returns the name of the checkpoint." inputs = {"task": {"type": "string", "description": "The task for which to get the download count."}} output_type = "string" def forward(self, task: str) -> str: most_downloaded_model = next(iter(list_models(filter=task, sort="downloads", direction=-1))) return most_downloaded_model.id ``` सबक्लास को निम्नलिखित एट्रिब्यूट्स की आवश्यकता होती है: - एक स्पष्ट `name`: नाम टूल के कार्य को स्पष्ट रूप से बताने वाला होना चाहिए। - एक `description`: यह भी LLM के लिए निर्देश मैनुअल की तरह काम करता है। - इनपुट प्रकार और उनके विवरण। - आउटपुट प्रकार। इन सभी एट्रिब्यूट्स को एजेंट की सिस्टम प्रॉम्प्ट में स्वचालित रूप से शामिल किया जाएगा, इन्हें स्पष्ट और विस्तृत बनाएं। </hfoption> </hfoptions> आप सीधे अपने एजेंट को इनिशियलाइज़ कर सकते हैं: ```py from smolagents import CodeAgent, InferenceClientModel agent = CodeAgent(tools=[model_download_tool], model=InferenceClientModel()) agent.run( "Can you give me the name of the model that has the most downloads in the 'text-to-video' task on the Hugging Face Hub?" ) ``` लॉग्स इस प्रकार होंगे: ```text ╭──────────────────────────────────────── New run ─────────────────────────────────────────╮ │ │ │ Can you give me the name of the model that has the most downloads in the 'text-to-video' │ │ task on the Hugging Face Hub? │ │ │ ╰─ InferenceClientModel - Qwen/Qwen2.5-Coder-32B-Instruct ───────────────────────────────────────────╯ ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ Step 0 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ ╭─ Executing this code: ───────────────────────────────────────────────────────────────────╮ │ 1 model_name = model_download_tool(task="text-to-video") │ │ 2 print(model_name) │ ╰──────────────────────────────────────────────────────────────────────────────────────────╯ Execution logs: ByteDance/AnimateDiff-Lightning Out: None [Step 0: Duration 0.27 seconds| Input tokens: 2,069 | Output tokens: 60] ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ Step 1 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ ╭─ Executing this code: ───────────────────────────────────────────────────────────────────╮ │ 1 final_answer("ByteDance/AnimateDiff-Lightning") │ ╰──────────────────────────────────────────────────────────────────────────────────────────╯ Out - Final answer: ByteDance/AnimateDiff-Lightning [Step 1: Duration 0.10 seconds| Input tokens: 4,288 | Output tokens: 148] Out[20]: 'ByteDance/AnimateDiff-Lightning' ``` [!TIP] > टूल्स के बारे में अधिक पढ़ें [dedicated tutorial](./tutorials/tools#टूल-क्या-है-और-इसे-कैसे-बनाएं) में। ## मल्टी-एजेंट्स Microsoft के फ्रेमवर्क [Autogen](https://huggingface.co/papers/2308.08155) के साथ मल्टी-एजेंट सिस्टम्स की शुरुआत हुई। इस प्रकार के फ्रेमवर्क में, आपके कार्य को हल करने के लिए कई एजेंट्स एक साथ काम करते हैं, न कि केवल एक। यह अधिकांश बेंचमार्क्स पर बेहतर प्रदर्शन देता है। इसका कारण यह है कि कई कार्यों के लिए, एक सर्व-समावेशी प्रणाली के बजाय, आप उप-कार्यों पर विशेषज्ञता रखने वाली इकाइयों को पसंद करेंगे। इस तरह, अलग-अलग टूल सेट्स और मेमोरी वाले एजेंट्स के पास विशेषकरण की अधिक कुशलता होती है। उदाहरण के लिए, कोड उत्पन्न करने वाले एजेंट की मेमोरी को वेब सर्च एजेंट द्वारा देखे गए वेबपेजों की सभी सामग्री से क्यों भरें? इन्हें अलग रखना बेहतर है। आप `smolagents` का उपयोग करके आसानी से श्रेणीबद्ध मल्टी-एजेंट सिस्टम्स बना सकते हैं। ऐसा करने के लिए, एजेंट को [`ManagedAgent`] ऑब्जेक्ट में समाहित करें। यह ऑब्जेक्ट `agent`, `name`, और एक `description` जैसे तर्कों की आवश्यकता होती है, जो फिर मैनेजर एजेंट की सिस्टम प्रॉम्प्ट में एम्बेड किया जाता है यहां एक एजेंट बनाने का उदाहरण दिया गया है जो हमारे [`WebSearchTool`] का उपयोग करके एक विशिष्ट वेब खोज एजेंट को प्रबंधित करता है। ```py from smolagents import CodeAgent, InferenceClientModel, WebSearchTool, ManagedAgent model = InferenceClientModel() web_agent = CodeAgent(tools=[WebSearchTool()], model=model) managed_web_agent = ManagedAgent( agent=web_agent, name="web_search", description="Runs web searches for you. Give it your query as an argument." ) manager_agent = CodeAgent( tools=[], model=model, managed_agents=[managed_web_agent] ) manager_agent.run("Who is the CEO of Hugging Face?") ``` > [!TIP] > कुशल मल्टी-एजेंट इंप्लीमेंटेशन का एक विस्तृत उदाहरण देखने के लिए, [कैसे हमने अपने मल्टी-एजेंट सिस्टम को GAIA लीडरबोर्ड के शीर्ष पर पहुंचाया](https://huggingface.co/blog/beating-gaia) पर जाएं। ## अपने एजेंट से बात करें और उसके विचारों को एक शानदार Gradio इंटरफेस में विज़ुअलाइज़ करें आप `GradioUI` का उपयोग करके अपने एजेंट को इंटरैक्टिव तरीके से कार्य सौंप सकते हैं और उसके सोचने और निष्पादन की प्रक्रिया को देख सकते हैं। नीचे एक उदाहरण दिया गया है: ```py from smolagents import ( load_tool, CodeAgent, InferenceClientModel, GradioUI ) # Import tool from Hub image_generation_tool = load_tool("m-ric/text-to-image", trust_remote_code=True) model = InferenceClientModel(model_id=model_id) # Initialize the agent with the image generation tool agent = CodeAgent(tools=[image_generation_tool], model=model) GradioUI(agent).launch() ``` अंदरूनी तौर पर, जब यूजर एक नया उत्तर टाइप करता है, तो एजेंट को `agent.run(user_request, reset=False)` के साथ लॉन्च किया जाता है। यहाँ `reset=False` फ्लैग का मतलब है कि एजेंट की मेमोरी इस नए कार्य को लॉन्च करने से पहले क्लियर नहीं होती, जिससे बातचीत जारी रहती है। आप इस `reset=False` आर्ग्युमेंट का उपयोग किसी भी अन्य एजेंटिक एप्लिकेशन में बातचीत जारी रखने के लिए कर सकते हैं। ## अगले कदम अधिक गहन उपयोग के लिए, आप हमारे ट्यूटोरियल्स देख सकते हैं: - [हमारे कोड एजेंट्स कैसे काम करते हैं इसका विवरण](./tutorials/secure_code_execution) - [अच्छे एजेंट्स बनाने के लिए यह गाइड](./tutorials/building_good_agents) - [टूल उपयोग के लिए इन-डेप्थ गाइड ](./tutorials/building_good_agents)।
smolagents/docs/source/hi/guided_tour.md/0
{ "file_path": "smolagents/docs/source/hi/guided_tour.md", "repo_id": "smolagents", "token_count": 16396 }
276
# 多步骤 agent 是如何工作的? ReAct 框架([Yao et al., 2022](https://huggingface.co/papers/2210.03629))是目前构建 agent 的主要方法。 该名称基于两个词的组合:"Reason" (推理)和 "Act" (行动)。实际上,遵循此架构的 agent 将根据需要尽可能多的步骤来解决其任务,每个步骤包括一个推理步骤,然后是一个行动步骤,在该步骤中,它制定工具调用,使其更接近解决手头的任务。 ReAct 过程涉及保留过去步骤的记忆。 > [!TIP] > 阅读 [Open-source LLMs as LangChain Agents](https://huggingface.co/blog/open-source-llms-as-agents) 博客文章以了解更多关于多步 agent 的信息。 以下是其工作原理的视频概述: <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/Agent_ManimCE.gif" /> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/Agent_ManimCE.gif" /> </div> ![ReAct agent 的框架](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/open-source-llms-as-agents/ReAct.png) 我们实现了两个版本的 ToolCallingAgent: - [`ToolCallingAgent`] 在其输出中生成 JSON 格式的工具调用。 - [`CodeAgent`] 是一种新型的 ToolCallingAgent,它生成代码块形式的工具调用,这对于具有强大编码性能的 LLM 非常有效。
smolagents/docs/source/zh/conceptual_guides/react.md/0
{ "file_path": "smolagents/docs/source/zh/conceptual_guides/react.md", "repo_id": "smolagents", "token_count": 859 }
277
from smolagents import ( CodeAgent, InferenceClientModel, LiteLLMModel, OpenAIServerModel, ToolCallingAgent, TransformersModel, tool, ) # Choose which inference type to use! available_inferences = ["inference_client", "transformers", "ollama", "litellm", "openai"] chosen_inference = "inference_client" print(f"Chose model: '{chosen_inference}'") if chosen_inference == "inference_client": model = InferenceClientModel(model_id="meta-llama/Llama-3.3-70B-Instruct", provider="nebius") elif chosen_inference == "transformers": model = TransformersModel(model_id="HuggingFaceTB/SmolLM2-1.7B-Instruct", device_map="auto", max_new_tokens=1000) elif chosen_inference == "ollama": model = LiteLLMModel( model_id="ollama_chat/llama3.2", api_base="http://localhost:11434", # replace with remote open-ai compatible server if necessary api_key="your-api-key", # replace with API key if necessary num_ctx=8192, # ollama default is 2048 which will often fail horribly. 8192 works for easy tasks, more is better. Check https://huggingface.co/spaces/NyxKrage/LLM-Model-VRAM-Calculator to calculate how much VRAM this will need for the selected model. ) elif chosen_inference == "litellm": # For anthropic: change model_id below to 'anthropic/claude-3-5-sonnet-latest' model = LiteLLMModel(model_id="gpt-4o") elif chosen_inference == "openai": # For anthropic: change model_id below to 'anthropic/claude-3-5-sonnet-latest' model = OpenAIServerModel(model_id="gpt-4o") @tool def get_weather(location: str, celsius: bool | None = False) -> str: """ Get weather in the next days at given location. Secretly this tool does not care about the location, it hates the weather everywhere. Args: location: the location celsius: the temperature """ return "The weather is UNGODLY with torrential rains and temperatures below -10°C" agent = ToolCallingAgent(tools=[get_weather], model=model, verbosity_level=2) print("ToolCallingAgent:", agent.run("What's the weather like in Paris?")) agent = CodeAgent(tools=[get_weather], model=model, verbosity_level=2, stream_outputs=True) print("CodeAgent:", agent.run("What's the weather like in Paris?"))
smolagents/examples/agent_from_any_llm.py/0
{ "file_path": "smolagents/examples/agent_from_any_llm.py", "repo_id": "smolagents", "token_count": 803 }
278
# This is copied from Magentic-one's great repo: https://github.com/microsoft/autogen/blob/v0.4.4/python/packages/autogen-magentic-one/src/autogen_magentic_one/markdown_browser/mdconvert.py # Thanks to Microsoft researchers for open-sourcing this! # type: ignore import base64 import copy import html import json import mimetypes import os import re import shutil import subprocess import sys import tempfile import traceback import zipfile from typing import Any from urllib.parse import parse_qs, quote, unquote, urlparse, urlunparse import mammoth import markdownify import pandas as pd import pdfminer import pdfminer.high_level import pptx # File-format detection import puremagic import pydub import requests import speech_recognition as sr from bs4 import BeautifulSoup from youtube_transcript_api import YouTubeTranscriptApi from youtube_transcript_api.formatters import SRTFormatter class _CustomMarkdownify(markdownify.MarkdownConverter): """ A custom version of markdownify's MarkdownConverter. Changes include: - Altering the default heading style to use '#', '##', etc. - Removing javascript hyperlinks. - Truncating images with large data:uri sources. - Ensuring URIs are properly escaped, and do not conflict with Markdown syntax """ def __init__(self, **options: Any): options["heading_style"] = options.get("heading_style", markdownify.ATX) # Explicitly cast options to the expected type if necessary super().__init__(**options) def convert_hn(self, n: int, el: Any, text: str, convert_as_inline: bool) -> str: """Same as usual, but be sure to start with a new line""" if not convert_as_inline: if not re.search(r"^\n", text): return "\n" + super().convert_hn(n, el, text, convert_as_inline) # type: ignore return super().convert_hn(n, el, text, convert_as_inline) # type: ignore def convert_a(self, el: Any, text: str, convert_as_inline: bool): """Same as usual converter, but removes Javascript links and escapes URIs.""" prefix, suffix, text = markdownify.chomp(text) # type: ignore if not text: return "" href = el.get("href") title = el.get("title") # Escape URIs and skip non-http or file schemes if href: try: parsed_url = urlparse(href) # type: ignore if parsed_url.scheme and parsed_url.scheme.lower() not in ["http", "https", "file"]: # type: ignore return "%s%s%s" % (prefix, text, suffix) href = urlunparse(parsed_url._replace(path=quote(unquote(parsed_url.path)))) # type: ignore except ValueError: # It's not clear if this ever gets thrown return "%s%s%s" % (prefix, text, suffix) # For the replacement see #29: text nodes underscores are escaped if ( self.options["autolinks"] and text.replace(r"\_", "_") == href and not title and not self.options["default_title"] ): # Shortcut syntax return "<%s>" % href if self.options["default_title"] and not title: title = href title_part = ' "%s"' % title.replace('"', r"\"") if title else "" return "%s[%s](%s%s)%s" % (prefix, text, href, title_part, suffix) if href else text def convert_img(self, el: Any, text: str, convert_as_inline: bool) -> str: """Same as usual converter, but removes data URIs""" alt = el.attrs.get("alt", None) or "" src = el.attrs.get("src", None) or "" title = el.attrs.get("title", None) or "" title_part = ' "%s"' % title.replace('"', r"\"") if title else "" if convert_as_inline and el.parent.name not in self.options["keep_inline_images_in"]: return alt # Remove dataURIs if src.startswith("data:"): src = src.split(",")[0] + "..." return "![%s](%s%s)" % (alt, src, title_part) def convert_soup(self, soup: Any) -> str: return super().convert_soup(soup) # type: ignore class DocumentConverterResult: """The result of converting a document to text.""" def __init__(self, title: str | None = None, text_content: str = ""): self.title: str | None = title self.text_content: str = text_content class DocumentConverter: """Abstract superclass of all DocumentConverters.""" def convert(self, local_path: str, **kwargs: Any) -> None | DocumentConverterResult: raise NotImplementedError() class PlainTextConverter(DocumentConverter): """Anything with content type text/plain""" def convert(self, local_path: str, **kwargs: Any) -> None | DocumentConverterResult: # Guess the content type from any file extension that might be around content_type, _ = mimetypes.guess_type("__placeholder" + kwargs.get("file_extension", "")) # Only accept text files if content_type is None: return None # elif "text/" not in content_type.lower(): # return None text_content = "" with open(local_path, "rt", encoding="utf-8") as fh: text_content = fh.read() return DocumentConverterResult( title=None, text_content=text_content, ) class HtmlConverter(DocumentConverter): """Anything with content type text/html""" def convert(self, local_path: str, **kwargs: Any) -> None | DocumentConverterResult: # Bail if not html extension = kwargs.get("file_extension", "") if extension.lower() not in [".html", ".htm"]: return None result = None with open(local_path, "rt", encoding="utf-8") as fh: result = self._convert(fh.read()) return result def _convert(self, html_content: str) -> None | DocumentConverterResult: """Helper function that converts and HTML string.""" # Parse the string soup = BeautifulSoup(html_content, "html.parser") # Remove javascript and style blocks for script in soup(["script", "style"]): script.extract() # Print only the main content body_elm = soup.find("body") webpage_text = "" if body_elm: webpage_text = _CustomMarkdownify().convert_soup(body_elm) else: webpage_text = _CustomMarkdownify().convert_soup(soup) assert isinstance(webpage_text, str) return DocumentConverterResult( title=None if soup.title is None else soup.title.string, text_content=webpage_text ) class WikipediaConverter(DocumentConverter): """Handle Wikipedia pages separately, focusing only on the main document content.""" def convert(self, local_path: str, **kwargs: Any) -> None | DocumentConverterResult: # Bail if not Wikipedia extension = kwargs.get("file_extension", "") if extension.lower() not in [".html", ".htm"]: return None url = kwargs.get("url", "") if not re.search(r"^https?:\/\/[a-zA-Z]{2,3}\.wikipedia.org\/", url): return None # Parse the file soup = None with open(local_path, "rt", encoding="utf-8") as fh: soup = BeautifulSoup(fh.read(), "html.parser") # Remove javascript and style blocks for script in soup(["script", "style"]): script.extract() # Print only the main content body_elm = soup.find("div", {"id": "mw-content-text"}) title_elm = soup.find("span", {"class": "mw-page-title-main"}) webpage_text = "" main_title = None if soup.title is None else soup.title.string if body_elm: # What's the title if title_elm and len(title_elm) > 0: main_title = title_elm.string # type: ignore assert isinstance(main_title, str) # Convert the page webpage_text = f"# {main_title}\n\n" + _CustomMarkdownify().convert_soup(body_elm) else: webpage_text = _CustomMarkdownify().convert_soup(soup) return DocumentConverterResult( title=main_title, text_content=webpage_text, ) class YouTubeConverter(DocumentConverter): """Handle YouTube specially, focusing on the video title, description, and transcript.""" def convert(self, local_path: str, **kwargs: Any) -> None | DocumentConverterResult: # Bail if not YouTube extension = kwargs.get("file_extension", "") if extension.lower() not in [".html", ".htm"]: return None url = kwargs.get("url", "") if not url.startswith("https://www.youtube.com/watch?"): return None # Parse the file soup = None with open(local_path, "rt", encoding="utf-8") as fh: soup = BeautifulSoup(fh.read(), "html.parser") # Read the meta tags assert soup.title is not None and soup.title.string is not None metadata: dict[str, str] = {"title": soup.title.string} for meta in soup(["meta"]): for a in meta.attrs: if a in ["itemprop", "property", "name"]: metadata[meta[a]] = meta.get("content", "") break # We can also try to read the full description. This is more prone to breaking, since it reaches into the page implementation try: for script in soup(["script"]): content = script.text if "ytInitialData" in content: lines = re.split(r"\r?\n", content) obj_start = lines[0].find("{") obj_end = lines[0].rfind("}") if obj_start >= 0 and obj_end >= 0: data = json.loads(lines[0][obj_start : obj_end + 1]) attrdesc = self._findKey(data, "attributedDescriptionBodyText") # type: ignore if attrdesc: metadata["description"] = str(attrdesc["content"]) break except Exception: pass # Start preparing the page webpage_text = "# YouTube\n" title = self._get(metadata, ["title", "og:title", "name"]) # type: ignore assert isinstance(title, str) if title: webpage_text += f"\n## {title}\n" stats = "" views = self._get(metadata, ["interactionCount"]) # type: ignore if views: stats += f"- **Views:** {views}\n" keywords = self._get(metadata, ["keywords"]) # type: ignore if keywords: stats += f"- **Keywords:** {keywords}\n" runtime = self._get(metadata, ["duration"]) # type: ignore if runtime: stats += f"- **Runtime:** {runtime}\n" if len(stats) > 0: webpage_text += f"\n### Video Metadata\n{stats}\n" description = self._get(metadata, ["description", "og:description"]) # type: ignore if description: webpage_text += f"\n### Description\n{description}\n" transcript_text = "" parsed_url = urlparse(url) # type: ignore params = parse_qs(parsed_url.query) # type: ignore if "v" in params: assert isinstance(params["v"][0], str) video_id = str(params["v"][0]) try: # Must be a single transcript. transcript = YouTubeTranscriptApi.get_transcript(video_id) # type: ignore # transcript_text = " ".join([part["text"] for part in transcript]) # type: ignore # Alternative formatting: transcript_text = SRTFormatter().format_transcript(transcript) except Exception: pass if transcript_text: webpage_text += f"\n### Transcript\n{transcript_text}\n" title = title if title else soup.title.string assert isinstance(title, str) return DocumentConverterResult( title=title, text_content=webpage_text, ) def _get(self, metadata: dict[str, str], keys: list[str], default: str | None = None) -> str | None: for k in keys: if k in metadata: return metadata[k] return default def _findKey(self, json: Any, key: str) -> str | None: # TODO: Fix json type if isinstance(json, list): for elm in json: ret = self._findKey(elm, key) if ret is not None: return ret elif isinstance(json, dict): for k in json: if k == key: return json[k] else: ret = self._findKey(json[k], key) if ret is not None: return ret return None class PdfConverter(DocumentConverter): """ Converts PDFs to Markdown. Most style information is ignored, so the results are essentially plain-text. """ def convert(self, local_path, **kwargs) -> None | DocumentConverterResult: # Bail if not a PDF extension = kwargs.get("file_extension", "") if extension.lower() != ".pdf": return None return DocumentConverterResult( title=None, text_content=pdfminer.high_level.extract_text(local_path), ) class DocxConverter(HtmlConverter): """ Converts DOCX files to Markdown. Style information (e.g.m headings) and tables are preserved where possible. """ def convert(self, local_path, **kwargs) -> None | DocumentConverterResult: # Bail if not a DOCX extension = kwargs.get("file_extension", "") if extension.lower() != ".docx": return None result = None with open(local_path, "rb") as docx_file: result = mammoth.convert_to_html(docx_file) html_content = result.value result = self._convert(html_content) return result class XlsxConverter(HtmlConverter): """ Converts XLSX files to Markdown, with each sheet presented as a separate Markdown table. """ def convert(self, local_path, **kwargs) -> None | DocumentConverterResult: # Bail if not a XLSX extension = kwargs.get("file_extension", "") if extension.lower() not in [".xlsx", ".xls"]: return None sheets = pd.read_excel(local_path, sheet_name=None) md_content = "" for s in sheets: md_content += f"## {s}\n" html_content = sheets[s].to_html(index=False) md_content += self._convert(html_content).text_content.strip() + "\n\n" return DocumentConverterResult( title=None, text_content=md_content.strip(), ) class PptxConverter(HtmlConverter): """ Converts PPTX files to Markdown. Supports heading, tables and images with alt text. """ def convert(self, local_path, **kwargs) -> None | DocumentConverterResult: # Bail if not a PPTX extension = kwargs.get("file_extension", "") if extension.lower() != ".pptx": return None md_content = "" presentation = pptx.Presentation(local_path) slide_num = 0 for slide in presentation.slides: slide_num += 1 md_content += f"\n\n<!-- Slide number: {slide_num} -->\n" title = slide.shapes.title for shape in slide.shapes: # Pictures if self._is_picture(shape): # https://github.com/scanny/python-pptx/pull/512#issuecomment-1713100069 alt_text = "" try: alt_text = shape._element._nvXxPr.cNvPr.attrib.get("descr", "") except Exception: pass # A placeholder name filename = re.sub(r"\W", "", shape.name) + ".jpg" md_content += "\n![" + (alt_text if alt_text else shape.name) + "](" + filename + ")\n" # Tables if self._is_table(shape): html_table = "<html><body><table>" first_row = True for row in shape.table.rows: html_table += "<tr>" for cell in row.cells: if first_row: html_table += "<th>" + html.escape(cell.text) + "</th>" else: html_table += "<td>" + html.escape(cell.text) + "</td>" html_table += "</tr>" first_row = False html_table += "</table></body></html>" md_content += "\n" + self._convert(html_table).text_content.strip() + "\n" # Text areas elif shape.has_text_frame: if shape == title: md_content += "# " + shape.text.lstrip() + "\n" else: md_content += shape.text + "\n" md_content = md_content.strip() if slide.has_notes_slide: md_content += "\n\n### Notes:\n" notes_frame = slide.notes_slide.notes_text_frame if notes_frame is not None: md_content += notes_frame.text md_content = md_content.strip() return DocumentConverterResult( title=None, text_content=md_content.strip(), ) def _is_picture(self, shape): if shape.shape_type == pptx.enum.shapes.MSO_SHAPE_TYPE.PICTURE: return True if shape.shape_type == pptx.enum.shapes.MSO_SHAPE_TYPE.PLACEHOLDER: if hasattr(shape, "image"): return True return False def _is_table(self, shape): if shape.shape_type == pptx.enum.shapes.MSO_SHAPE_TYPE.TABLE: return True return False class MediaConverter(DocumentConverter): """ Abstract class for multi-modal media (e.g., images and audio) """ def _get_metadata(self, local_path): exiftool = shutil.which("exiftool") if not exiftool: return None else: try: result = subprocess.run([exiftool, "-json", local_path], capture_output=True, text=True).stdout return json.loads(result)[0] except Exception: return None class WavConverter(MediaConverter): """ Converts WAV files to markdown via extraction of metadata (if `exiftool` is installed), and speech transcription (if `speech_recognition` is installed). """ def convert(self, local_path, **kwargs) -> None | DocumentConverterResult: # Bail if not a XLSX extension = kwargs.get("file_extension", "") if extension.lower() != ".wav": return None md_content = "" # Add metadata metadata = self._get_metadata(local_path) if metadata: for f in [ "Title", "Artist", "Author", "Band", "Album", "Genre", "Track", "DateTimeOriginal", "CreateDate", "Duration", ]: if f in metadata: md_content += f"{f}: {metadata[f]}\n" # Transcribe try: transcript = self._transcribe_audio(local_path) md_content += "\n\n### Audio Transcript:\n" + ("[No speech detected]" if transcript == "" else transcript) except Exception: md_content += "\n\n### Audio Transcript:\nError. Could not transcribe this audio." return DocumentConverterResult( title=None, text_content=md_content.strip(), ) def _transcribe_audio(self, local_path) -> str: recognizer = sr.Recognizer() with sr.AudioFile(local_path) as source: audio = recognizer.record(source) return recognizer.recognize_google(audio).strip() class Mp3Converter(WavConverter): """ Converts MP3 and M4A files to markdown via extraction of metadata (if `exiftool` is installed), and speech transcription (if `speech_recognition` AND `pydub` are installed). """ def convert(self, local_path, **kwargs) -> None | DocumentConverterResult: # Bail if not a MP3 extension = kwargs.get("file_extension", "") if extension.lower() not in [".mp3", ".m4a"]: return None md_content = "" # Add metadata metadata = self._get_metadata(local_path) if metadata: for f in [ "Title", "Artist", "Author", "Band", "Album", "Genre", "Track", "DateTimeOriginal", "CreateDate", "Duration", ]: if f in metadata: md_content += f"{f}: {metadata[f]}\n" # Transcribe handle, temp_path = tempfile.mkstemp(suffix=".wav") os.close(handle) try: if extension.lower() == ".mp3": sound = pydub.AudioSegment.from_mp3(local_path) else: sound = pydub.AudioSegment.from_file(local_path, format="m4a") sound.export(temp_path, format="wav") _args = dict() _args.update(kwargs) _args["file_extension"] = ".wav" try: transcript = super()._transcribe_audio(temp_path).strip() md_content += "\n\n### Audio Transcript:\n" + ( "[No speech detected]" if transcript == "" else transcript ) except Exception: md_content += "\n\n### Audio Transcript:\nError. Could not transcribe this audio." finally: os.unlink(temp_path) # Return the result return DocumentConverterResult( title=None, text_content=md_content.strip(), ) class ZipConverter(DocumentConverter): """ Extracts ZIP files to a permanent local directory and returns a listing of extracted files. """ def __init__(self, extract_dir: str = "downloads"): """ Initialize with path to extraction directory. Args: extract_dir: The directory where files will be extracted. Defaults to "downloads" """ self.extract_dir = extract_dir # Create the extraction directory if it doesn't exist os.makedirs(self.extract_dir, exist_ok=True) def convert(self, local_path: str, **kwargs: Any) -> None | DocumentConverterResult: # Bail if not a ZIP file extension = kwargs.get("file_extension", "") if extension.lower() != ".zip": return None # Verify it's actually a ZIP file if not zipfile.is_zipfile(local_path): return None # Extract all files and build list extracted_files = [] with zipfile.ZipFile(local_path, "r") as zip_ref: # Extract all files zip_ref.extractall(self.extract_dir) # Get list of all files for file_path in zip_ref.namelist(): # Skip directories if not file_path.endswith("/"): extracted_files.append(self.extract_dir + "/" + file_path) # Sort files for consistent output extracted_files.sort() # Build the markdown content md_content = "Downloaded the following files:\n" for file in extracted_files: md_content += f"* {file}\n" return DocumentConverterResult(title="Extracted Files", text_content=md_content.strip()) class ImageConverter(MediaConverter): """ Converts images to markdown via extraction of metadata (if `exiftool` is installed), OCR (if `easyocr` is installed), and description via a multimodal LLM (if an mlm_client is configured). """ def convert(self, local_path, **kwargs) -> None | DocumentConverterResult: # Bail if not a XLSX extension = kwargs.get("file_extension", "") if extension.lower() not in [".jpg", ".jpeg", ".png"]: return None md_content = "" # Add metadata metadata = self._get_metadata(local_path) if metadata: for f in [ "ImageSize", "Title", "Caption", "Description", "Keywords", "Artist", "Author", "DateTimeOriginal", "CreateDate", "GPSPosition", ]: if f in metadata: md_content += f"{f}: {metadata[f]}\n" # Try describing the image with GPTV mlm_client = kwargs.get("mlm_client") mlm_model = kwargs.get("mlm_model") if mlm_client is not None and mlm_model is not None: md_content += ( "\n# Description:\n" + self._get_mlm_description( local_path, extension, mlm_client, mlm_model, prompt=kwargs.get("mlm_prompt") ).strip() + "\n" ) return DocumentConverterResult( title=None, text_content=md_content, ) def _get_mlm_description(self, local_path, extension, client, model, prompt=None): if prompt is None or prompt.strip() == "": prompt = "Write a detailed caption for this image." sys.stderr.write(f"MLM Prompt:\n{prompt}\n") data_uri = "" with open(local_path, "rb") as image_file: content_type, encoding = mimetypes.guess_type("_dummy" + extension) if content_type is None: content_type = "image/jpeg" image_base64 = base64.b64encode(image_file.read()).decode("utf-8") data_uri = f"data:{content_type};base64,{image_base64}" messages = [ { "role": "user", "content": [ {"type": "text", "text": prompt}, { "type": "image_url", "image_url": { "url": data_uri, }, }, ], } ] response = client.chat.completions.create(model=model, messages=messages) return response.choices[0].message.content class FileConversionException(Exception): pass class UnsupportedFormatException(Exception): pass class MarkdownConverter: """(In preview) An extremely simple text-based document reader, suitable for LLM use. This reader will convert common file-types or webpages to Markdown.""" def __init__( self, requests_session: requests.Session | None = None, mlm_client: Any | None = None, mlm_model: Any | None = None, ): if requests_session is None: self._requests_session = requests.Session() else: self._requests_session = requests_session self._mlm_client = mlm_client self._mlm_model = mlm_model self._page_converters: list[DocumentConverter] = [] # Register converters for successful browsing operations # Later registrations are tried first / take higher priority than earlier registrations # To this end, the most specific converters should appear below the most generic converters self.register_page_converter(PlainTextConverter()) self.register_page_converter(HtmlConverter()) self.register_page_converter(WikipediaConverter()) self.register_page_converter(YouTubeConverter()) self.register_page_converter(DocxConverter()) self.register_page_converter(XlsxConverter()) self.register_page_converter(PptxConverter()) self.register_page_converter(WavConverter()) self.register_page_converter(Mp3Converter()) self.register_page_converter(ImageConverter()) self.register_page_converter(ZipConverter()) self.register_page_converter(PdfConverter()) def convert( self, source: str | requests.Response, **kwargs: Any ) -> DocumentConverterResult: # TODO: deal with kwargs """ Args: - source: can be a string representing a path or url, or a requests.response object - extension: specifies the file extension to use when interpreting the file. If None, infer from source (path, uri, content-type, etc.) """ # Local path or url if isinstance(source, str): if source.startswith("http://") or source.startswith("https://") or source.startswith("file://"): return self.convert_url(source, **kwargs) else: return self.convert_local(source, **kwargs) # Request response elif isinstance(source, requests.Response): return self.convert_response(source, **kwargs) def convert_local(self, path: str, **kwargs: Any) -> DocumentConverterResult: # TODO: deal with kwargs # Prepare a list of extensions to try (in order of priority) ext = kwargs.get("file_extension") extensions = [ext] if ext is not None else [] # Get extension alternatives from the path and puremagic base, ext = os.path.splitext(path) self._append_ext(extensions, ext) self._append_ext(extensions, self._guess_ext_magic(path)) # Convert return self._convert(path, extensions, **kwargs) # TODO what should stream's type be? def convert_stream(self, stream: Any, **kwargs: Any) -> DocumentConverterResult: # TODO: deal with kwargs # Prepare a list of extensions to try (in order of priority) ext = kwargs.get("file_extension") extensions = [ext] if ext is not None else [] # Save the file locally to a temporary file. It will be deleted before this method exits handle, temp_path = tempfile.mkstemp() fh = os.fdopen(handle, "wb") result = None try: # Write to the temporary file content = stream.read() if isinstance(content, str): fh.write(content.encode("utf-8")) else: fh.write(content) fh.close() # Use puremagic to check for more extension options self._append_ext(extensions, self._guess_ext_magic(temp_path)) # Convert result = self._convert(temp_path, extensions, **kwargs) # Clean up finally: try: fh.close() except Exception: pass os.unlink(temp_path) return result def convert_url(self, url: str, **kwargs: Any) -> DocumentConverterResult: # TODO: fix kwargs type # Send a HTTP request to the URL user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36 Edg/119.0.0.0" response = self._requests_session.get(url, stream=True, headers={"User-Agent": user_agent}) response.raise_for_status() return self.convert_response(response, **kwargs) def convert_response( self, response: requests.Response, **kwargs: Any ) -> DocumentConverterResult: # TODO fix kwargs type # Prepare a list of extensions to try (in order of priority) ext = kwargs.get("file_extension") extensions = [ext] if ext is not None else [] # Guess from the mimetype content_type = response.headers.get("content-type", "").split(";")[0] self._append_ext(extensions, mimetypes.guess_extension(content_type)) # Read the content disposition if there is one content_disposition = response.headers.get("content-disposition", "") m = re.search(r"filename=([^;]+)", content_disposition) if m: base, ext = os.path.splitext(m.group(1).strip("\"'")) self._append_ext(extensions, ext) # Read from the extension from the path base, ext = os.path.splitext(urlparse(response.url).path) self._append_ext(extensions, ext) # Save the file locally to a temporary file. It will be deleted before this method exits handle, temp_path = tempfile.mkstemp() fh = os.fdopen(handle, "wb") result = None try: # Download the file for chunk in response.iter_content(chunk_size=512): fh.write(chunk) fh.close() # Use puremagic to check for more extension options self._append_ext(extensions, self._guess_ext_magic(temp_path)) # Convert result = self._convert(temp_path, extensions, url=response.url) except Exception as e: print(f"Error in converting: {e}") # Clean up finally: try: fh.close() except Exception: pass os.unlink(temp_path) return result def _convert(self, local_path: str, extensions: list[str | None], **kwargs) -> DocumentConverterResult: error_trace = "" for ext in extensions + [None]: # Try last with no extension for converter in self._page_converters: _kwargs = copy.deepcopy(kwargs) # Overwrite file_extension appropriately if ext is None: if "file_extension" in _kwargs: del _kwargs["file_extension"] else: _kwargs.update({"file_extension": ext}) # Copy any additional global options if "mlm_client" not in _kwargs and self._mlm_client is not None: _kwargs["mlm_client"] = self._mlm_client if "mlm_model" not in _kwargs and self._mlm_model is not None: _kwargs["mlm_model"] = self._mlm_model # If we hit an error log it and keep trying try: res = converter.convert(local_path, **_kwargs) except Exception: error_trace = ("\n\n" + traceback.format_exc()).strip() if res is not None: # Normalize the content res.text_content = "\n".join([line.rstrip() for line in re.split(r"\r?\n", res.text_content)]) res.text_content = re.sub(r"\n{3,}", "\n\n", res.text_content) # Todo return res # If we got this far without success, report any exceptions if len(error_trace) > 0: raise FileConversionException( f"Could not convert '{local_path}' to Markdown. File type was recognized as {extensions}. While converting the file, the following error was encountered:\n\n{error_trace}" ) # Nothing can handle it! raise UnsupportedFormatException( f"Could not convert '{local_path}' to Markdown. The formats {extensions} are not supported." ) def _append_ext(self, extensions, ext): """Append a unique non-None, non-empty extension to a list of extensions.""" if ext is None: return ext = ext.strip() if ext == "": return # if ext not in extensions: if True: extensions.append(ext) def _guess_ext_magic(self, path): """Use puremagic (a Python implementation of libmagic) to guess a file's extension based on the first few bytes.""" # Use puremagic to guess try: guesses = puremagic.magic_file(path) if len(guesses) > 0: ext = guesses[0].extension.strip() if len(ext) > 0: return ext except FileNotFoundError: pass except IsADirectoryError: pass except PermissionError: pass return None def register_page_converter(self, converter: DocumentConverter) -> None: """Register a page text converter.""" self._page_converters.insert(0, converter)
smolagents/examples/open_deep_research/scripts/mdconvert.py/0
{ "file_path": "smolagents/examples/open_deep_research/scripts/mdconvert.py", "repo_id": "smolagents", "token_count": 16913 }
279
# How to run with uv: # uv run structured_output_tool.py # # Modify the smolagents dependency to point to the local smolagents repo or # remove `@ file:///<path-to-smolagents>` # # /// script # requires-python = ">=3.10" # dependencies = [ # "smolagents[mcp,litellm] @ file:///<path-to-smolagents>", # "pydantic", # ] # /// from textwrap import dedent from mcp import StdioServerParameters from smolagents import CodeAgent, InferenceClientModel, LiteLLMModel, MCPClient # noqa: F401 def weather_server_script() -> str: """Return an inline MCP server script that exposes a weather tool.""" return dedent( ''' from pydantic import BaseModel, Field from mcp.server.fastmcp import FastMCP mcp = FastMCP("Weather Service") class WeatherInfo(BaseModel): location: str = Field(description="The location name") temperature: float = Field(description="Temperature in Celsius") conditions: str = Field(description="Weather conditions") humidity: int = Field(description="Humidity percentage", ge=0, le=100) @mcp.tool( name="get_weather_info", description="Get weather information for a location as structured data.", ) def get_weather_info(city: str) -> WeatherInfo: """Get weather information for a city.""" return WeatherInfo( location=city, temperature=22.5, conditions="partly cloudy", humidity=65 ) mcp.run() ''' ) def main() -> None: # Configure your inference model # model = InferenceClientModel() model = LiteLLMModel( model_id="mistral/mistral-small-latest", # model_id="openai/gpt-4o-mini", ) # Start the Weather MCP server from an inline script in this same file serverparams = StdioServerParameters(command="python", args=["-c", weather_server_script()]) # Bridge MCP tools into SmolAgents with structured outputs enabled with MCPClient( serverparams, structured_output=True, ) as tools: agent = CodeAgent(tools=tools, model=model) # Example query that encourages tool use and unit conversion agent.run("What is the temperature in Tokyo in Fahrenheit?") if __name__ == "__main__": main()
smolagents/examples/structured_output_tool.py/0
{ "file_path": "smolagents/examples/structured_output_tool.py", "repo_id": "smolagents", "token_count": 934 }
280
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import PIL.Image import pytest from smolagents.agent_types import _AGENT_TYPE_MAPPING from smolagents.default_tools import FinalAnswerTool from .test_tools import ToolTesterMixin from .utils.markers import require_torch class TestFinalAnswerTool(ToolTesterMixin): def setup_method(self): self.inputs = {"answer": "Final answer"} self.tool = FinalAnswerTool() def test_exact_match_arg(self): result = self.tool("Final answer") assert result == "Final answer" def test_exact_match_kwarg(self): result = self.tool(answer=self.inputs["answer"]) assert result == "Final answer" @require_torch def test_agent_type_output(self, inputs): for input_type, input in inputs.items(): output = self.tool(**input, sanitize_inputs_outputs=True) agent_type = _AGENT_TYPE_MAPPING[input_type] assert isinstance(output, agent_type) @pytest.fixture def inputs(self, shared_datadir): import torch return { "string": {"answer": "Text input"}, "image": {"answer": PIL.Image.open(shared_datadir / "000000039769.png").resize((512, 512))}, "audio": {"answer": torch.Tensor(np.ones(3000))}, }
smolagents/tests/test_final_answer.py/0
{ "file_path": "smolagents/tests/test_final_answer.py", "repo_id": "smolagents", "token_count": 680 }
281
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Markers for tests .""" import os from importlib.util import find_spec import pytest require_run_all = pytest.mark.skipif(not os.getenv("RUN_ALL"), reason="requires RUN_ALL environment variable") require_soundfile = pytest.mark.skipif(find_spec("soundfile") is None, reason="requires soundfile") require_torch = pytest.mark.skipif(find_spec("torch") is None, reason="requires torch")
smolagents/tests/utils/markers.py/0
{ "file_path": "smolagents/tests/utils/markers.py", "repo_id": "smolagents", "token_count": 280 }
282
ARG cuda_arch_list="75-real;80-real;86-real;89-real;90-real;100-real;120-real" ARG cuda_base=12.8.0 ARG build_type=release ARG ompi_version=4.1.7 ARG sccache_gha_enabled=off ARG actions_results_url="" ARG actions_runtime_token="" # CUDA dependent dependencies resolver stage FROM nvidia/cuda:${cuda_base}-cudnn-devel-ubuntu24.04 AS cuda-builder RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \ build-essential \ cmake \ curl \ gcc-14 \ g++-14 \ git \ git-lfs \ lld \ libssl-dev \ libucx-dev \ libasan8 \ libubsan1 \ ninja-build \ pkg-config \ pipx \ python3 \ python3-dev \ python3-setuptools \ tar \ wget --no-install-recommends && \ pipx ensurepath ENV TGI_INSTALL_PREFIX=/usr/local/tgi ENV TENSORRT_INSTALL_PREFIX=/usr/local/tensorrt # Install OpenMPI FROM cuda-builder AS mpi-builder WORKDIR /opt/src/mpi ARG ompi_version ENV OMPI_VERSION=${ompi_version} ENV OMPI_TARBALL_FILENAME=openmpi-${OMPI_VERSION}.tar.bz2 ADD --checksum=sha256:54a33cb7ad81ff0976f15a6cc8003c3922f0f3d8ceed14e1813ef3603f22cd34 \ https://download.open-mpi.org/release/open-mpi/v4.1/${OMPI_TARBALL_FILENAME} . RUN tar --strip-components=1 -xf ${OMPI_TARBALL_FILENAME} &&\ ./configure --prefix=/usr/local/mpi --with-cuda=/usr/local/cuda --with-slurm && \ make -j all && \ make install && \ rm -rf ${OMPI_TARBALL_FILENAME}/.. # Install TensorRT FROM cuda-builder AS trt-builder COPY backends/trtllm/scripts/install_tensorrt.sh /opt/install_tensorrt.sh RUN chmod +x /opt/install_tensorrt.sh && \ /opt/install_tensorrt.sh # Build Backend FROM cuda-builder AS tgi-builder WORKDIR /usr/src/text-generation-inference # Scoped global args reuse ARG cuda_arch_list ARG build_type ARG sccache_gha_enabled ARG actions_results_url ARG actions_runtime_token # Install Rust ENV PATH="/root/.cargo/bin:$PATH" RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --default-toolchain 1.85.1 --profile minimal -y && \ chmod -R a+w /root/.rustup && \ chmod -R a+w /root/.cargo && \ cargo install sccache --version ">=0.10.0" --locked ENV LD_LIBRARY_PATH="/usr/local/mpi/lib:$LD_LIBRARY_PATH" ENV PKG_CONFIG_PATH="/usr/local/mpi/lib/pkgconfig" ENV CMAKE_PREFIX_PATH="/usr/local/mpi:/usr/local/tensorrt" ENV USE_LLD_LINKER=ON ENV CUDA_ARCH_LIST=${cuda_arch_list} # SCCACHE Specifics args - before finding a better, more generic, way... ENV SCCACHE_GHA_ENABLED=${sccache_gha_enabled} ENV ACTIONS_RESULTS_URL=${actions_results_url} ENV ACTIONS_RUNTIME_TOKEN=${actions_runtime_token} COPY Cargo.lock Cargo.lock COPY Cargo.toml Cargo.toml COPY rust-toolchain.toml rust-toolchain.toml COPY router router COPY backends backends COPY benchmark benchmark COPY launcher launcher COPY --from=trt-builder /usr/local/tensorrt /usr/local/tensorrt COPY --from=mpi-builder /usr/local/mpi /usr/local/mpi ENV RUSTC_WRAPPER=sccache ENV CMAKE_INSTALL_PREFIX=$TGI_INSTALL_PREFIX RUN export CC=gcc-14 \ export CXX=g++-14 \ export CMAKE_C_COMPILER_LAUNCHER=sccache && \ export CMAKE_CXX_COMPILER_LAUNCHER=sccache && \ export CMAKE_CUDA_COMPILER_LAUNCHER=sccache && \ mkdir $TGI_INSTALL_PREFIX && mkdir "$TGI_INSTALL_PREFIX/include" && mkdir "$TGI_INSTALL_PREFIX/lib" && \ cargo build --profile ${build_type} --package text-generation-backends-trtllm --bin text-generation-backends-trtllm && \ sccache --show-stats FROM nvidia/cuda:${cuda_base}-cudnn-runtime-ubuntu24.04 AS runtime RUN apt update && apt install -y libucx0 pipx python3-minimal python3-dev python3-pip python3-venv && \ rm -rf /var/lib/{apt,dpkg,cache,log}/ && \ pipx ensurepath && \ pipx install --include-deps transformers tokenizers WORKDIR /usr/local/tgi/bin ENV PATH=/root/.local/share/pipx/venvs/transformers/bin/:$PATH ENV LD_LIBRARY_PATH="/usr/local/tgi/lib:/usr/local/mpi/lib:/usr/local/tensorrt/lib:/usr/local/cuda/lib64/stubs:$LD_LIBRARY_PATH" ENV TOKENIZERS_PARALLELISM=false ENV OMPI_MCA_plm_rsh_agent="" COPY --from=mpi-builder /usr/local/mpi /usr/local/mpi COPY --from=trt-builder /usr/local/tensorrt /usr/local/tensorrt COPY --from=tgi-builder /usr/local/tgi /usr/local/tgi COPY --from=tgi-builder /usr/src/text-generation-inference/target/release/text-generation-backends-trtllm /usr/local/tgi/bin/text-generation-launcher # This is used only for the CI/CD FROM nvidia/cuda:${cuda_base}-cudnn-runtime-ubuntu24.04 AS ci-runtime RUN apt update && apt install -y libasan8 libubsan1 libucx0 pipx python3-minimal python3-dev python3-pip python3-venv && \ rm -rf /var/lib/{apt,dpkg,cache,log}/ && \ pipx ensurepath && \ pipx install --include-deps transformers tokenizers WORKDIR /usr/local/tgi/bin ENV PATH=/root/.local/share/pipx/venvs/transformers/bin/:$PATH ENV LD_LIBRARY_PATH="/usr/local/tgi/lib:/usr/local/mpi/lib:/usr/local/tensorrt/lib:/usr/local/cuda/lib64/stubs:$LD_LIBRARY_PATH" ENV TOKENIZERS_PARALLELISM=false ENV OMPI_MCA_plm_rsh_agent="" COPY --from=mpi-builder /usr/local/mpi /usr/local/mpi COPY --from=trt-builder /usr/local/tensorrt /usr/local/tensorrt COPY --from=tgi-builder /usr/local/tgi /usr/local/tgi # Basically we copy from target/debug instead of target/release COPY --from=tgi-builder /usr/src/text-generation-inference/target/debug/text-generation-backends-trtllm /usr/local/tgi/bin/text-generation-launcher # This is the final image FROM runtime LABEL co.huggingface.vendor="Hugging Face Inc." LABEL org.opencontainers.image.authors="hardware@hf.co" LABEL org.opencontainers.title="Text-Generation-Inference TensorRT-LLM Backend" ENTRYPOINT ["./text-generation-launcher"] CMD ["--executor-worker", "/usr/local/tgi/bin/executorWorker"]
text-generation-inference/Dockerfile_trtllm/0
{ "file_path": "text-generation-inference/Dockerfile_trtllm", "repo_id": "text-generation-inference", "token_count": 2436 }
283
from typing import TYPE_CHECKING, Optional, List import torch import torch.distributed from torch import nn from torch.distributed import ProcessGroup from text_generation_server.utils.sgmv import ( add_lora_a_bgmv, add_lora_b_bgmv, has_sgmv, lora_a_sgmv_cutlass, lora_b_sgmv_cutlass, orient_for_rank, ) if TYPE_CHECKING: from text_generation_server.adapters import AdapterBatchData from text_generation_server.adapters.lora import BatchLoraWeights class LoraLinear(nn.Module): def __init__( self, base_layer: nn.Module, layer_id: int, process_group: ProcessGroup ): super().__init__() self.base_layer = base_layer self.layer_id = layer_id self.process_group = process_group def forward_layer_type( self, result: torch.Tensor, input: torch.Tensor, adapter_data: "AdapterBatchData", layer_type: str, start_idx: int, end_idx: int, ) -> torch.Tensor: if adapter_data is None: return result data: Optional["BatchLoraWeights"] = adapter_data.data.get(layer_type) if has_sgmv() and data is not None and data.can_vectorize(self.process_group): # In tensor-parallel configurations, each GPU processes a specific segment of the output. # The 'result' tensor represents the full output, which can vary in size based on # the layer type (e.g., attention vs. feed-forward layers). We define the current # segment using start_idx and end_idx. If the segment size doesn't match this GPU's # slice of 'result', we create a zero tensor of the correct size for LoRA computation. # This approach ensures accurate LoRA application across various layer sizes and # configurations, adapting to different model architectures and parallelization strategies. # # Example scenarios where this is necessary: # 1. The adapter's size doesn't evenly divide across GPUs. # 2. We're processing the last segment which might be smaller. # 3. Different projection layers (q, k, v) have different sizes. if end_idx - start_idx != result.shape[1]: proj = torch.zeros_like(result[:, start_idx:end_idx]) else: proj = result for r, rank_segments in data.rank_data.items(): lora_a_ptr = rank_segments.lora_a_ptr lora_b_ptr = rank_segments.lora_b_ptr if lora_a_ptr is None or lora_b_ptr is None: raise ValueError("LoRA data is missing") if data.use_sgmv: # Use SGMV for prefill v = lora_a_sgmv_cutlass( input, rank_segments.tmp_shrink, lora_a_ptr, rank_segments.segment_starts, rank_segments.segment_ends, self.layer_id, r, ) if self.process_group.size() > 1: v = self.collect_lora_a(v) lora_b_sgmv_cutlass( proj, v, rank_segments.tmp_expand, lora_b_ptr, rank_segments.segment_starts, rank_segments.segment_ends, self.layer_id, ) else: # Use BGMV for decode v = torch.zeros( (input.size(0), r), dtype=input.dtype, device=input.device ) # TODO: error with [-1, 0], but not [0, -1] add_lora_a_bgmv( v, input, lora_a_ptr, rank_segments.indices, self.layer_id, ) if self.process_group.size() > 1: v = self.collect_lora_a(v) add_lora_b_bgmv( proj, v, lora_b_ptr, rank_segments.indices, self.layer_id, ) if end_idx - start_idx != result.shape[1]: result[:, start_idx:end_idx] += proj else: for adapter_index in adapter_data.meta.adapter_set: if data is not None and data.has_adapter(adapter_index): adapter_mask = ( (adapter_data.meta.adapter_indices == adapter_index) .to(input.dtype) .view(-1, 1) ) layer_result = self.forward_lora( input, data, adapter_index, adapter_mask ) result[:, start_idx:end_idx] += layer_result return result def forward_lora( self, input: torch.Tensor, data: "BatchLoraWeights", adapter_index: int, adapter_mask: torch.Tensor, ) -> torch.Tensor: lora_a = data.lora_a[adapter_index][self.layer_id, :, :] lora_b = data.lora_b[adapter_index][self.layer_id, :, :] lora_a = orient_for_rank(lora_a, lora_b.size(0)) a_out = input @ lora_a if self.process_group.size() > 1: a_out = self.collect_lora_a(a_out) result = (a_out @ lora_b) * adapter_mask return result def collect_lora_a(self, a_out: torch.Tensor) -> torch.Tensor: raise NotImplementedError("Implemented in subclasses") class TensorParallelMultiAdapterLinear(LoraLinear): def __init__( self, base_layer: nn.Module, layer_id: int, layer_names: List[str], sizes: List[int], process_group: ProcessGroup, ): super().__init__(base_layer, layer_id, process_group) self.layer_names = layer_names self.sizes = sizes @classmethod def load( cls, base_layer: nn.Module, layer_id: int, layer_names: List[str], sizes: List[int], process_group: ProcessGroup, ): return TensorParallelMultiAdapterLinear( base_layer, layer_id, layer_names, sizes, process_group ) def forward( self, input: torch.Tensor, adapter_data: "AdapterBatchData" ) -> torch.Tensor: result = self.base_layer(input) # noop if no layer names are provided (e.g. for models without adapters) if self.layer_names is None: return result # handle models like Bloom that have inputs of shape # (batch_size, sequence_length, hidden_size) # we need to reshape them to (batch_size * sequence_length, hidden_size) # for the LoRA computation, then reshape back prev_shape = result.shape is_3d = len(input.shape) >= 3 if is_3d: input = input.reshape(-1, input.shape[-1]) result = result.reshape(-1, result.shape[-1]) offset = 0 for i, layer_name in enumerate(self.layer_names): start_idx = offset // self.process_group.size() # The 'sizes' parameter is essential in tensor-parallel setups for handling multiple # projection layers (q_proj, k_proj, v_proj) by defining their output dimensions. It # ensures correct slicing of the result tensor, accommodating variations like grouped-query # attention where k_proj and v_proj differ from q_proj. This allows precise application of # LoRA adapters to each sub-component of the multi-head attention mechanism, managing the # different projection sizes across layers and model architectures. if self.sizes is not None: offset += self.sizes[i] end_idx = offset // self.process_group.size() else: end_idx = result.shape[1] result = self.forward_layer_type( result, input, adapter_data, layer_name, start_idx, end_idx ) if is_3d: result = result.reshape(prev_shape) return result def collect_lora_a(self, a_out: torch.Tensor) -> torch.Tensor: # Tensor parallel implementation of X @ A@B, where A and B are sharded column-wise. # We use an all-gather between X@A and (X@A)@B to ensure alignment across ranks. # # TODO(travis): this is not very efficient as we do an all-gather for every adapter, # instead we could pre-allocate a (B, a, r) tensor for all adapters with the same # rank, compute `a_out` on each, and then slice them into the buffer as shown here: # https://discuss.pytorch.org/t/concatenate-tensors-without-memory-copying/34609 gathered_tensors = [ torch.empty_like(a_out) for _ in range(self.process_group.size()) ] torch.distributed.all_gather(gathered_tensors, a_out) return torch.cat(gathered_tensors, dim=1) class TensorParallelAdapterRowLinear(LoraLinear): def __init__(self, base_layer, layer_id, layer_name, process_group): super().__init__(base_layer, layer_id, process_group) self.layer_name = layer_name @classmethod def load(cls, base_layer, layer_id, layer_name, process_group): return cls(base_layer, layer_id, layer_name, process_group) def forward( self, input: torch.Tensor, adapter_data: "AdapterBatchData" ) -> torch.Tensor: result = self.base_layer(input) if self.layer_name is None: return result # Fused all-gather + all-reduce from S-LoRA paper: https://arxiv.org/abs/2311.03285 stride = result.shape[-1] // self.process_group.size() start_idx = self.process_group.rank() * stride end_idx = (self.process_group.rank() + 1) * stride self.forward_layer_type( result, input, adapter_data, self.layer_name, start_idx, end_idx ) return result def collect_lora_a(self, a_out: torch.Tensor) -> torch.Tensor: # Tensor parallel implementation of X @ A@B, where A and B are sharded row-wise. # We use an all-reduce between X@A and (X@A)@B to ensure alignment across ranks. # # TODO(travis): this is not very efficient as we do an all-reduce for every adapter, # instead we could pre-allocate a (B, a, r) tensor for all adapters with the same # rank, compute `a_out` on each, and then slice them into the buffer as shown here: # https://discuss.pytorch.org/t/concatenate-tensors-without-memory-copying/34609 torch.distributed.all_reduce(a_out, group=self.process_group) return a_out
text-generation-inference/backends/gaudi/server/text_generation_server/layers/lora.py/0
{ "file_path": "text-generation-inference/backends/gaudi/server/text_generation_server/layers/lora.py", "repo_id": "text-generation-inference", "token_count": 5398 }
284
# coding=utf-8 # Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Phi-MoE model.""" from transformers.configuration_utils import PretrainedConfig from transformers.utils import logging logger = logging.get_logger(__name__) PHIMOE_PRETRAINED_CONFIG_ARCHIVE_MAP = { "microsoft/Phi-3.5-MoE-instruct": "https://huggingface.co/microsoft/Phi-3.5-MoE-instruct/resolve/main/config.json", } class PhiMoEConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`PhiMoEModel`]. It is used to instantiate a Phi-MoE model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [microsoft/Phi-3.5-MoE-instruct](https://huggingface.co/microsoft/Phi-3.5-MoE-instruct). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 32064): Vocabulary size of the PhiMoE model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`PhiMoEModel`] hidden_size (`int`, *optional*, defaults to 4096): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 6400): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 32): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the Transformer encoder. num_key_value_heads (`int`, *optional*, defaults to 8): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details checkout [this paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `8`. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to `4096*32`): The maximum sequence length that this model might ever be used with. Mixtral's sliding window attention allows sequence of up to 4096*32 tokens. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. pad_token_id (`int`, *optional*): The id of the padding token. bos_token_id (`int`, *optional*, defaults to 1): The id of the "beginning-of-sequence" token. eos_token_id (`int`, *optional*, defaults to 2): The id of the "end-of-sequence" token. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether the model's input and output word embeddings should be tied. rope_theta (`float`, *optional*, defaults to 10000.0): The base period of the RoPE embeddings. rope_scaling (`dict`, *optional*): The scaling strategy for the RoPE embeddings. If `None`, no scaling is applied. If a dictionary, it must contain the following keys: `type`, `short_factor`, `long_factor`, `short_mscale`, `long_mscale` and `original_max_position_embeddings`. The `type` must be `longrope`, the `short_mscale` and `long_scale` must be numbers, the `short_factor` and `long_factor` must be lists of numbers with the same length as half of the attention head size and the `original_max_position_embeddings` must be an integer. sliding_window (`int`, *optional*): Sliding window attention window size. If not specified, will default to `262144`. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. num_experts_per_tok (`int`, *optional*, defaults to 2): The number of experts to root per-token, can be also interpreted as the `top-p` routing parameter num_local_experts (`int`, *optional*, defaults to 16): Number of experts per Sparse MLP layer. output_router_logits (`bool`, *optional*, defaults to `False`): Whether or not the router logits should be returned by the model. Enabeling this will also allow the model to output the auxiliary loss. See [here]() for more details router_aux_loss_coef (`float`, *optional*, defaults to 0.0): The aux loss factor for the total loss. router_jitter_noise (`float`, *optional*, defaults to 0.01): Amount of noise to add to the router. ```python >>> from transformers import PhiMoEModel, PhiMoEConfig >>> # Initializing a Phi-3 style configuration >>> configuration = PhiMoEConfig.from_pretrained("microsoft/Phi-3.5-MoE-instruct") >>> # Initializing a model from the configuration >>> model = PhiMoEModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "phimoe" keys_to_ignore_at_inference = ["past_key_values"] def __init__( self, vocab_size=32064, hidden_size=4096, intermediate_size=6400, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=8, hidden_act="silu", max_position_embeddings=4096 * 32, initializer_range=0.02, rms_norm_eps=1e-5, use_cache=True, pad_token_id=None, bos_token_id=1, eos_token_id=2, tie_word_embeddings=False, rope_theta=1e6, rope_scaling=None, sliding_window=None, attention_dropout=0.0, num_experts_per_tok=2, num_local_experts=16, output_router_logits=False, router_aux_loss_coef=0.001, router_jitter_noise=0.01, input_jitter_noise=0.0, attention_bias=False, lm_head_bias=False, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.sliding_window = sliding_window self.attention_bias = attention_bias self.lm_head_bias = lm_head_bias # for backward compatibility if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.rope_theta = rope_theta self.attention_dropout = attention_dropout self.num_experts_per_tok = num_experts_per_tok self.num_local_experts = num_local_experts self.output_router_logits = output_router_logits self.router_aux_loss_coef = router_aux_loss_coef self.router_jitter_noise = router_jitter_noise self.input_jitter_noise = input_jitter_noise self.rope_scaling = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, ) def _rope_scaling_validation(self): """ Validate the `rope_scaling` configuration. """ if self.rope_scaling is None: return if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 6: raise ValueError( "`rope_scaling` must be a dictionary with three fields, `type`, `short_factor`, `long_factor`, " f"`short_mscale`, `long_mscale` and `original_max_position_embeddings`, got {self.rope_scaling}" ) rope_scaling_type = self.rope_scaling.get("type", None) rope_scaling_short_factor = self.rope_scaling.get("short_factor", None) rope_scaling_long_factor = self.rope_scaling.get("long_factor", None) rope_scaling_short_mscale = self.rope_scaling.get("short_mscale", None) rope_scaling_long_mscale = self.rope_scaling.get("long_mscale", None) original_max_position_embeddings = self.rope_scaling.get( "original_max_position_embeddings", None ) if rope_scaling_type is None or rope_scaling_type not in ["longrope"]: raise ValueError( f"`rope_scaling`'s type field must be one of ['longrope'], got {rope_scaling_type}" ) if not ( isinstance(rope_scaling_short_factor, list) and all(isinstance(x, (int, float)) for x in rope_scaling_short_factor) ): raise ValueError( f"`rope_scaling`'s short_factor field must be a list of numbers, got {rope_scaling_short_factor}" ) if ( not len(rope_scaling_short_factor) == self.hidden_size // self.num_attention_heads // 2 ): raise ValueError( f"`rope_scaling`'s short_factor field must have length {self.hidden_size // self.num_attention_heads // 2}, got {len(rope_scaling_short_factor)}" ) if not ( isinstance(rope_scaling_long_factor, list) and all(isinstance(x, (int, float)) for x in rope_scaling_long_factor) ): raise ValueError( f"`rope_scaling`'s long_factor field must be a list of numbers, got {rope_scaling_long_factor}" ) if ( not len(rope_scaling_long_factor) == self.hidden_size // self.num_attention_heads // 2 ): raise ValueError( f"`rope_scaling`'s long_factor field must have length {self.hidden_size // self.num_attention_heads // 2}, got {len(rope_scaling_long_factor)}" ) if not isinstance(rope_scaling_short_mscale, (int, float)): raise ValueError( f"`rope_scaling`'s short_mscale field must be a number, got {rope_scaling_short_mscale}" ) if not isinstance(rope_scaling_long_mscale, (int, float)): raise ValueError( f"`rope_scaling`'s long_mscale field must be a number, got {rope_scaling_long_mscale}" ) if not isinstance(original_max_position_embeddings, int): raise ValueError( f"`rope_scaling`'s original_max_position_embeddings field must be an integer, got {original_max_position_embeddings}" )
text-generation-inference/backends/gaudi/server/text_generation_server/models/custom_modeling/flash_phi_moe_modeling.py/0
{ "file_path": "text-generation-inference/backends/gaudi/server/text_generation_server/models/custom_modeling/flash_phi_moe_modeling.py", "repo_id": "text-generation-inference", "token_count": 5177 }
285
import os from typing import Dict, Optional from loguru import logger from text_generation_server.utils.log import log_master REQUEST_LOGPROBS = os.getenv("REQUEST_LOGPROBS", "0").lower() in {"1", "true"} ATTENTION = os.getenv("ATTENTION", "paged") # default_prefix_caching = "1" if ATTENTION in {"flashinfer", "flashdecoding"} else "0" PREFIX_CACHING = os.getenv("PREFIX_CACHING", "0").lower() in { "1", "true", } log_master(logger.info, f"Using prefix caching = {PREFIX_CACHING}") _expected = {"paged"} assert ( ATTENTION in _expected ), f"Attention is not valid {ATTENTION}, expected {_expected}" log_master(logger.info, f"Using Attention = {ATTENTION}") TGI_WIGGLE_ROOM = float(os.getenv("TGI_WIGGLE_ROOM", "0.90")) assert TGI_WIGGLE_ROOM > 0 assert TGI_WIGGLE_ROOM < 1 # This is overridden by the cli BLOCK_SIZE: int BLOCK_SIZE = 128 # This is overridden at model loading. global MODEL_ID MODEL_ID = None def set_model_id(model_id: str): global MODEL_ID MODEL_ID = model_id # NOTE: eventually we should move this into the router and pass back the # index in all cases. ADAPTER_TO_INDEX: Optional[Dict[str, int]] = None def set_adapter_to_index(adapter_to_index: Dict[str, int]): global ADAPTER_TO_INDEX ADAPTER_TO_INDEX = adapter_to_index def get_adapter_to_index(): global ADAPTER_TO_INDEX return ADAPTER_TO_INDEX
text-generation-inference/backends/gaudi/server/text_generation_server/models/globals.py/0
{ "file_path": "text-generation-inference/backends/gaudi/server/text_generation_server/models/globals.py", "repo_id": "text-generation-inference", "token_count": 526 }
286
[package] name = "grpc-metadata" version = "0.1.0" edition = "2021" [dependencies] opentelemetry = "^0.20" tonic = "^0.10" tracing = "^0.1" tracing-opentelemetry = "^0.21"
text-generation-inference/backends/grpc-metadata/Cargo.toml/0
{ "file_path": "text-generation-inference/backends/grpc-metadata/Cargo.toml", "repo_id": "text-generation-inference", "token_count": 83 }
287
[build-system] requires = ["setuptools>=78.1"] build-backend = "setuptools.build_meta" [project] name = "text-generation-server" version = "VERSION" authors = [{name="David Corvoysier", email="david@huggingface.co" }] description = "TGI compatible inference server for AWS Neuronx platforms" dependencies = [ 'protobuf > 3.20.1, < 4', 'grpcio == 1.57.0', 'grpcio-status == 1.48.2', 'grpcio-reflection == 1.48.2', 'grpc-interceptor == 0.15.2', 'typer == 0.6.1', 'safetensors', 'loguru == 0.6.0', 'optimum-neuron[neuronx] >= 0.0.28', ] [tool.setuptools] packages = ["text_generation_server", "text_generation_server.pb"] [project.scripts] text-generation-server = 'text_generation_server.cli:app'
text-generation-inference/backends/neuron/server/pyproject.toml/0
{ "file_path": "text-generation-inference/backends/neuron/server/pyproject.toml", "repo_id": "text-generation-inference", "token_count": 303 }
288
import pytest import torch from text_generation_server.generator import Slot from text_generation_server.pb.generate_pb2 import Request from transformers import AutoTokenizer, GenerationConfig TOKENIZERS = ["NousResearch/Llama-2-7b-hf", "gpt2"] @pytest.fixture(params=TOKENIZERS) def tokenizer(request): t = AutoTokenizer.from_pretrained(request.param) t.padding_side = "left" t.pad_token_id = t.eos_token_id return t @pytest.mark.parametrize( "input_text, generated_text", [ [ "It was a bright cold day in April, and the clocks were striking thirteen.", " Winston Smith, his chin nuzzled into his breast in an effort to escape the vile wind," " slipped quickly through the glass doors of Victory Mansions, though not quickly enough" " to prevent a swirl of gritty dust from entering along with him.", ], ["This sentence is written in chinese:", "我很感谢你的热情"], ["Some text might contain a lot of emojis like 😃", "😍💪 👉 👀"], ], ids=["spaces", "chinese-utf8", "emojis"], ) def test_decode_streaming(tokenizer, input_text, generated_text): slot = Slot(0, tokenizer) request = Request(id=0, inputs=input_text) slot.assign(0, request, GenerationConfig()) assert slot.cached_text == input_text inputs = tokenizer( input_text, padding="max_length", max_length=len(input_text) + 1, return_tensors="pt", ) input_ids = inputs["input_ids"][0] attention_mask = inputs["attention_mask"][0] generated_tokens = tokenizer(generated_text, add_special_tokens=False)["input_ids"] # We need to regenerate the full text as the tokenizer might change it (extra spaces might be added) all_input_ids = torch.cat([input_ids, torch.tensor(generated_tokens)]) full_text = tokenizer.decode(all_input_ids, skip_special_tokens=True) regenerated_text = full_text[len(input_text) :] # Initialize the slot with the inputs slot.reset(input_ids, attention_mask, selector=None) assert slot.generated_tokens == 0 # Simulate an iterative generation (i.e. don't call select and use known tokens instead) decoded_text = "" for i in range(len(generated_tokens)): text = slot.append(generated_tokens[i]) assert slot.generated_tokens == i + 1 decoded_text += text assert decoded_text == regenerated_text
text-generation-inference/backends/neuron/tests/server/test_generator_slot.py/0
{ "file_path": "text-generation-inference/backends/neuron/tests/server/test_generator_slot.py", "repo_id": "text-generation-inference", "token_count": 928 }
289
#ifndef TGI_BACKEND_TRTLLM_FFI #define TGI_BACKEND_TRTLLM_FFI #include <memory> #include <thread> #include <nvml.h> #include <tensorrt_llm/common/tllmException.h> #include <tensorrt_llm/plugins/api/tllmPlugin.h> #include <spdlog/spdlog.h> #include <backend.hpp> #include <hardware.hpp> namespace rust::behavior { template<typename Try, typename Fail> static void trycatch(Try &&func, Fail &&fail) noexcept try { func(); } catch (tensorrt_llm::common::TllmException &e) { fail(e.what()); } } namespace huggingface::tgi::backends::trtllm { class tensorrt_llm_backend_t; } #include "backends/trtllm/src/lib.rs.h" namespace huggingface::tgi::backends::trtllm { std::once_flag backend_initialized_flag; constexpr finish_reason_t as_finish_reason_t(const tle::FinishReason reason) noexcept { switch (reason) { case tle::FinishReason::kNOT_FINISHED: return finish_reason_t::kNOT_FINISHED; case tle::FinishReason::kSTOP_WORDS: return finish_reason_t::kSTOP_WORDS; case tle::FinishReason::kEND_ID: return finish_reason_t::kEND_ID; case tle::FinishReason::kLENGTH: return finish_reason_t::kLENGTH; default: std::unreachable(); } } static auto as_generation_step = [](const tle::Response &r) { const auto reqId = r.getRequestId(); if (!r.hasError()) [[likely]] { const auto result = r.getResult(); const auto logits = result.logProbs.value()[0]; return generation_step_t{ reqId, static_cast<uint32_t>(result.outputTokenIds[0][0]), logits.back(), result.isFinal, as_finish_reason_t(result.finishReasons[0]), false, std::string() }; } else { return generation_step_t{ reqId, 0, 0.0, true, finish_reason_t::kNOT_FINISHED, true, std::move(r.getErrorMsg()) }; } }; class tensorrt_llm_backend_t { private: backend_t inner_; public: tensorrt_llm_backend_t(std::filesystem::path &&engine_folder, std::filesystem::path &&executor_worker_path) : inner_(engine_folder, executor_worker_path) {} size_t num_tokens_ready() const noexcept { return inner_.num_tokens_ready(); } request_id_t submit( rust::Slice<const uint32_t> tokens, uint32_t max_new_tokens, uint32_t top_k, float_t top_p, float_t temperature, float_t repetition_penalty, float_t frequency_penalty, uint64_t seed ) { // This is enabled only if using add_compile_definitions(SPDLOG_ACTIVE_LEVEL=SPDLOG_LEVEL_TRACE) SPDLOG_TRACE(FMT_STRING("[FFI] Submitting {:d} prompt tokens to the executor")); // Submit the request to the executor and get back a potential request_id used to track request status const auto signed_tokens = std::vector<int32_t>(tokens.begin(), tokens.end()); const auto maybe_request_id = inner_.submit( signed_tokens, {max_new_tokens}, {top_k, top_p, repetition_penalty, frequency_penalty, temperature, seed} ); // If we do have a value, let's return the request_id if (maybe_request_id.has_value()) [[likely]] { return *maybe_request_id; } else { SPDLOG_WARN("[FFI] Failed to submit request to the executor"); return maybe_request_id.error(); } } std::unique_ptr<std::vector<generation_step_t>> pull_tokens() noexcept { if (num_tokens_ready() > 0) [[likely]] { const auto responses = inner_.pull_tokens(); SPDLOG_TRACE("[FFI] Successfully pulled out {:d} responses from executor", responses.size()); // Transform tle::Response to generation_step_t #ifdef __cpp_lib_ranges_to_container auto steps = responses | std::views::transform(as_generation_step) | std::ranges::to<std::vector>(); #else auto steps = std::vector<generation_step_t>(); steps.reserve(responses.size()); std::transform(responses.begin(), responses.end(), std::back_inserter(steps), as_generation_step); #endif return std::make_unique<std::vector<generation_step_t>>(steps); } else { return std::make_unique<std::vector<generation_step_t>>(); } } void cancel(request_id_t request_id) noexcept { SPDLOG_DEBUG("[FFI] cancelling request {:d}", request_id); inner_.cancel(request_id); } }; void initialize_logging() { #ifndef TGI_TRTLLM_BACKEND_DEBUG if (const auto TRTLLM_LOG_LEVEL_CSTR = std::getenv("TRTLLM_LOG_LEVEL")) { std::string log_level(TRTLLM_LOG_LEVEL_CSTR); std::transform(log_level.begin(), log_level.end(), log_level.begin(), [](unsigned char c) { return std::tolower(c); }); if (log_level == "debug") spdlog::set_level(spdlog::level::debug); else spdlog::set_level(spdlog::level::info); } #else spdlog::set_level(spdlog::level::debug); #endif } void initialize_tensorrt_llm_backend() { SPDLOG_INFO("Initializing TGI - TensoRT-LLM Backend (v{})", tle::version()); // Initialize everyone initialize_logging(); nvmlInit_v2(); initTrtLlmPlugins(); const auto numGpus = huggingface::tgi::hardware::cuda::get_device_count(); if (numGpus.has_value()) { SPDLOG_INFO("[FFI] Detected {:d} Nvidia GPU(s)", *numGpus); } else { SPDLOG_WARN("[FFI] Failed to detected Nvidia GPU(s) on the system"); // todo: throw } } std::unique_ptr<tensorrt_llm_backend_t> create_backend_from_engine_folder(const rust::Str engines_folder, const rust::Str executor_worker_path) { std::call_once(backend_initialized_flag, initialize_tensorrt_llm_backend); return std::make_unique<tensorrt_llm_backend_t>( std::filesystem::path(std::string_view(engines_folder.begin(), engines_folder.end()), std::filesystem::path::format::auto_format), std::filesystem::path(std::string_view(executor_worker_path.begin(), executor_worker_path.end()), std::filesystem::path::format::auto_format) ); } } #endif
text-generation-inference/backends/trtllm/csrc/ffi.hpp/0
{ "file_path": "text-generation-inference/backends/trtllm/csrc/ffi.hpp", "repo_id": "text-generation-inference", "token_count": 3501 }
290
[package] name = "text-generation-benchmark" description = "Text Generation Benchmarking tool" version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true [lib] path = "src/lib.rs" [[bin]] name = "text-generation-benchmark" path = "src/main.rs" [dependencies] average = "0.14" clap = { version = "4.4.5", features = ["derive", "env"] } float-ord = "0.3.2" serde = {version = "1.0.188", features = ["derive"]} serde_json = "1.0" tabled = "0.14.0" text-generation-client = { path = "../backends/client" } thiserror = "1.0.48" tokenizers = { workspace = true } tokio = { version = "1.32.0", features = ["rt", "rt-multi-thread", "parking_lot", "signal", "sync", "macros"] } ratatui = "0.28.1" tracing = "0.1.37" tracing-subscriber = { version = "0.3.17", features = ["json", "env-filter"] } hf-hub = { workspace = true }
text-generation-inference/benchmark/Cargo.toml/0
{ "file_path": "text-generation-inference/benchmark/Cargo.toml", "repo_id": "text-generation-inference", "token_count": 335 }
291
from text_generation.errors import ( parse_error, GenerationError, IncompleteGenerationError, OverloadedError, ValidationError, BadRequestError, ShardNotReadyError, ShardTimeoutError, NotFoundError, RateLimitExceededError, UnknownError, ) def test_generation_error(): payload = {"error_type": "generation", "error": "test"} assert isinstance(parse_error(400, payload), GenerationError) def test_incomplete_generation_error(): payload = {"error_type": "incomplete_generation", "error": "test"} assert isinstance(parse_error(400, payload), IncompleteGenerationError) def test_overloaded_error(): payload = {"error_type": "overloaded", "error": "test"} assert isinstance(parse_error(400, payload), OverloadedError) def test_validation_error(): payload = {"error_type": "validation", "error": "test"} assert isinstance(parse_error(400, payload), ValidationError) def test_bad_request_error(): payload = {"error": "test"} assert isinstance(parse_error(400, payload), BadRequestError) def test_shard_not_ready_error(): payload = {"error": "test"} assert isinstance(parse_error(403, payload), ShardNotReadyError) assert isinstance(parse_error(424, payload), ShardNotReadyError) def test_shard_timeout_error(): payload = {"error": "test"} assert isinstance(parse_error(504, payload), ShardTimeoutError) def test_not_found_error(): payload = {"error": "test"} assert isinstance(parse_error(404, payload), NotFoundError) def test_rate_limit_exceeded_error(): payload = {"error": "test"} assert isinstance(parse_error(429, payload), RateLimitExceededError) def test_unknown_error(): payload = {"error": "test"} assert isinstance(parse_error(500, payload), UnknownError)
text-generation-inference/clients/python/tests/test_errors.py/0
{ "file_path": "text-generation-inference/clients/python/tests/test_errors.py", "repo_id": "text-generation-inference", "token_count": 598 }
292
# Neuron backend for AWS Trainium and Inferentia The Neuron backend allows the deployment of TGI on AWS Trainium and Inferentia family of chips. The following hardware targets are supported: - Trainium 1, - Inferentia 2. ## Features The basic TGI features are supported: - continuous batching, - token streaming, - greedy search and multinomial sampling using [transformers](https://huggingface.co/docs/transformers/generation_strategies#customize-text-generation). ## Deploy the service from the Hugging Face hub The simplest way to deploy the NeuronX TGI service for a specific model is to follow the deployment instructions in the model card: - click on the "Deploy" button on the right, - select your deployment service ("Inference Endpoints" and "SageMaker" are supported), - select "AWS Trainum & Inferentia", - follow the instructions. ## Deploy the service on a dedicated host The service is launched simply by running the text-generation-inference container with two sets of parameters: ``` docker run <system_parameters> ghcr.io/huggingface/text-generation-inference:3.3.4-neuron <service_parameters> ``` - system parameters are used to map ports, volumes and devices between the host and the service, - service parameters are forwarded to the `text-generation-launcher`. When deploying a service, you will need a pre-compiled Neuron model. The Neuron TGI backend supports two main modes of operation: - you can either deploy the service on a model that has already been exported to Neuron, - or alternatively you can take advantage of the Neuron Model Cache to export your own model. ### Common system parameters Whenever you launch a TGI service, we highly recommend you to mount a shared volume mounted as `/data` in the container: this is where the models will be cached to speed up further instantiations of the service. Note also that enough neuron devices should be made visible to the container, knowing that each neuron device has two cores (so when deploying on two cores you need to expose at least one device). The recommended way to expose a device in a production environment is to use explicitly the `--device` option (e.g `--device /dev/neuron0`) repeated as many time as there are devices to be exposed. Note: alternatively, for a quick local test it is also possible to launch the service in `privileged` mode to get access to all neuron devices. Finally, you might want to export the `HF_TOKEN` if you want to access gated repositories. Here is an example of a service instantiation exposing only the first device: ``` docker run -p 8080:80 \ -v $(pwd)/data:/data \ --device=/dev/neuron0 \ -e HF_TOKEN=${HF_TOKEN} \ ghcr.io/huggingface/text-generation-inference:<VERSION>-neuron \ <service_parameters> ``` ### Using a standard model from the 🤗 [HuggingFace Hub](https://huggingface.co/aws-neuron) (recommended) We maintain a Neuron Model Cache of the most popular architecture and deployment parameters under [aws-neuron/optimum-neuron-cache](https://huggingface.co/aws-neuron/optimum-neuron-cache). If you just want to try the service quickly using a model without exporting it to Neuron first, it is thus still possible, pending some conditions: - you must specify the export parameters when launching the service (or use default parameters), - the model configuration must be cached. The snippet below shows how you can deploy a service from a hub standard model: ``` export HF_TOKEN=<YOUR_TOKEN> docker run -p 8080:80 \ -v $(pwd)/data:/data \ --device=/dev/neuron0 \ --device=/dev/neuron1 \ --device=/dev/neuron2 \ --device=/dev/neuron3 \ -e HF_TOKEN=${HF_TOKEN} \ -e HF_AUTO_CAST_TYPE="fp16" \ -e HF_NUM_CORES=8 \ ghcr.io/huggingface/text-generation-inference:<VERSION>-neuron \ --model-id meta-llama/Meta-Llama-3-8B \ --max-batch-size 1 \ --max-input-length 3164 \ --max-total-tokens 4096 ``` ### Using a model exported to a local path Alternatively, you can first [export the model to neuron format](https://huggingface.co/docs/optimum-neuron/main/en/guides/export_model#exporting-neuron-models-using-text-generation-inference) locally. You can then deploy the service inside the shared volume: ``` docker run -p 8080:80 \ -v $(pwd)/data:/data \ --device=/dev/neuron0 \ --device=/dev/neuron1 \ ghcr.io/huggingface/text-generation-inference:<VERSION>-neuron \ --model-id /data/<neuron_model_path> ``` Note: You don't need to specify any service parameters, as they will all be deduced from the model export configuration. You must however expose enough devices to match the number of cores specified during the export phase. ### Using a neuron model from the 🤗 [HuggingFace Hub](https://huggingface.co/) The easiest way to share a neuron model inside your organization is to push it on the Hugging Face hub, so that it can be deployed directly without requiring an export. The snippet below shows how you can deploy a service from a hub neuron model: ``` docker run -p 8080:80 \ -v $(pwd)/data:/data \ --device=/dev/neuron0 \ --device=/dev/neuron1 \ -e HF_TOKEN=${HF_TOKEN} \ ghcr.io/huggingface/text-generation-inference:<VERSION>-neuron \ --model-id <organization>/<neuron-model> ``` ### Choosing service parameters Use the following command to list the available service parameters: ``` docker run ghcr.io/huggingface/text-generation-inference:<VERSION>-neuron --help ``` The configuration of an inference endpoint is always a compromise between throughput and latency: serving more requests in parallel will allow a higher throughput, but it will increase the latency. The neuron models have static input dimensions `[batch_size, max_length]`. This adds several restrictions to the following parameters: - `--max-batch-size` must be set to `batch size`, - `--max-input-length` must be lower than `max_length`, - `--max-total-tokens` must be set to `max_length` (it is per-request). Although not strictly necessary, but important for efficient prefilling: - `--max-batch-prefill-tokens` should be set to `batch_size` * `max-input-length`. ### Choosing the correct batch size As seen in the previous paragraph, neuron model static batch size has a direct influence on the endpoint latency and throughput. Please refer to [text-generation-inference](https://github.com/huggingface/text-generation-inference) for optimization hints. Note that the main constraint is to be able to fit the model for the specified `batch_size` within the total device memory available on your instance (16GB per neuron core, with 2 cores per device). ## Query the service You can query the model using either the `/generate` or `/generate_stream` routes: ``` curl 127.0.0.1:8080/generate \ -X POST \ -d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":20}}' \ -H 'Content-Type: application/json' ``` ``` curl 127.0.0.1:8080/generate_stream \ -X POST \ -d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":20}}' \ -H 'Content-Type: application/json' ``` Note: replace 127.0.0.1:8080 with your actual IP address and port.
text-generation-inference/docs/source/backends/neuron.md/0
{ "file_path": "text-generation-inference/docs/source/backends/neuron.md", "repo_id": "text-generation-inference", "token_count": 2226 }
293
# LoRA (Low-Rank Adaptation) ## What is LoRA? LoRA is a technique that allows for efficent fine-tuning a model while only updating a small portion of the model's weights. This is useful when you have a large model that has been pre-trained on a large dataset, but you want to fine-tune it on a smaller dataset or for a specific task. LoRA works by adding a small number of additional weights to the model, which are used to adapt the model to the new dataset or task. These additional weights are learned during the fine-tuning process, while the rest of the model's weights are kept fixed. ## How is it used? LoRA can be used in many ways and the community is always finding new ways to use it. Here are some examples of how you can use LoRA: Technically, LoRA can be used to fine-tune a large language model on a small dataset. However, these use cases can span a wide range of applications, such as: - fine-tuning a language model on a small dataset - fine-tuning a language model on a domain-specific dataset - fine-tuning a language model on a dataset with limited labels ## Optimizing Inference with LoRA LoRA's can be used during inference by mutliplying the adapter weights with the model weights at each specified layer. This process can be computationally expensive, but due to awesome work by [punica-ai](https://github.com/punica-ai/punica) and the [lorax](https://github.com/predibase/lorax) team, optimized kernels/and frameworks have been developed to make this process more efficient. TGI leverages these optimizations in order to provide fast and efficient inference with mulitple LoRA models. ## Serving multiple LoRA adapters with TGI Once a LoRA model has been trained, it can be used to generate text or perform other tasks just like a regular language model. However, because the model has been fine-tuned on a specific dataset, it may perform better on that dataset than a model that has not been fine-tuned. In practice its often useful to have multiple LoRA models, each fine-tuned on a different dataset or for a different task. This allows you to use the model that is best suited for a particular task or dataset. Text Generation Inference (TGI) now supports loading multiple LoRA models at startup that can be used in generation requests. This feature is available starting from version `~2.0.6` and is compatible with LoRA models trained using the `peft` library. ### Specifying LoRA models To use LoRA in TGI, when starting the server, you can specify the list of LoRA models to load using the `LORA_ADAPTERS` environment variable. For example: ```bash LORA_ADAPTERS=predibase/customer_support,predibase/dbpedia ``` To specify model revision, use `adapter_id@revision`, as follows: ```bash LORA_ADAPTERS=predibase/customer_support@main,predibase/dbpedia@rev2 ``` To use a locally stored lora adapter, use `adapter-name=/path/to/adapter`, as seen below. When you want to use this adapter, set `"parameters": {"adapter_id": "adapter-name"}"` ```bash LORA_ADAPTERS=myadapter=/some/path/to/adapter,myadapter2=/another/path/to/adapter ``` note it's possible to mix adapter_ids with adapter_id=adapter_path e.g. ```bash LORA_ADAPTERS=predibase/dbpedia,myadapter=/path/to/dir/ ``` In the server logs, you will see the following message: ```txt Loading adapter weights into model: predibase/customer_support Loading adapter weights into model: predibase/dbpedia ``` ## Generate text You can then use these models in generation requests by specifying the `lora_model` parameter in the request payload. For example: ```json curl 127.0.0.1:3000/generate \ -X POST \ -H 'Content-Type: application/json' \ -d '{ "inputs": "Hello who are you?", "parameters": { "max_new_tokens": 40, "adapter_id": "predibase/customer_support" } }' ``` If you are using a lora adapter stored locally that was set in the following manner: `LORA_ADAPTERS=myadapter=/some/path/to/adapter`, here is an example payload: ```json curl 127.0.0.1:3000/generate \ -X POST \ -H 'Content-Type: application/json' \ -d '{ "inputs": "Hello who are you?", "parameters": { "max_new_tokens": 40, "adapter_id": "myadapter" } }' ``` > **Note:** The Lora feature is new and still being improved. If you encounter any issues or have any feedback, please let us know by opening an issue on the [GitHub repository](https://github.com/huggingface/text-generation-inference/issues/new/choose). Additionally documentation and an improved client library will be published soon. An updated tutorial with detailed examples will be published soon. Stay tuned!
text-generation-inference/docs/source/conceptual/lora.md/0
{ "file_path": "text-generation-inference/docs/source/conceptual/lora.md", "repo_id": "text-generation-inference", "token_count": 1339 }
294
# Quick Tour The easiest way of getting started is using the official Docker container. Install Docker following [their installation instructions](https://docs.docker.com/get-docker/). ## Launching TGI Let's say you want to deploy [teknium/OpenHermes-2.5-Mistral-7B](https://huggingface.co/teknium/OpenHermes-2.5-Mistral-7B) model with TGI on an Nvidia GPU. Here is an example on how to do that: ```bash model=teknium/OpenHermes-2.5-Mistral-7B volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data \ ghcr.io/huggingface/text-generation-inference:3.3.4 \ --model-id $model ``` <Tip> If you want to serve gated or private models, please refer to [this guide](https://huggingface.co/docs/text-generation-inference/en/basic_tutorials/gated_model_access) for detailed instructions. </Tip> ### Supported hardware TGI supports various hardware. Make sure to check the [Using TGI with Nvidia GPUs](./installation_nvidia), [Using TGI with AMD GPUs](./installation_amd), [Using TGI with Intel GPUs](./installation_intel), [Using TGI with Gaudi](./installation_gaudi), [Using TGI with Inferentia](./installation_inferentia) guides depending on which hardware you would like to deploy TGI on. ## Consuming TGI Once TGI is running, you can use the `generate` endpoint or the Open AI Chat Completion API compatible [Messages API](https://huggingface.co/docs/text-generation-inference/en/messages_api) by doing requests. To learn more about how to query the endpoints, check the [Consuming TGI](./basic_tutorials/consuming_tgi) section, where we show examples with utility libraries and UIs. Below you can see a simple snippet to query the endpoint. <inferencesnippet> <python> ```python import requests headers = { "Content-Type": "application/json", } data = { 'inputs': 'What is Deep Learning?', 'parameters': { 'max_new_tokens': 20, }, } response = requests.post('http://127.0.0.1:8080/generate', headers=headers, json=data) print(response.json()) # {'generated_text': '\n\nDeep Learning is a subset of Machine Learning that is concerned with the development of algorithms that can'} ``` </python> <js> ```js async function query() { const response = await fetch( 'http://127.0.0.1:8080/generate', { method: 'POST', headers: { 'Content-Type': 'application/json'}, body: JSON.stringify({ 'inputs': 'What is Deep Learning?', 'parameters': { 'max_new_tokens': 20 } }) } ); } query().then((response) => { console.log(JSON.stringify(response)); }); /// {"generated_text":"\n\nDeep Learning is a subset of Machine Learning that is concerned with the development of algorithms that can"} ``` </js> <curl> ```curl curl 127.0.0.1:8080/generate \ -X POST \ -d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":20}}' \ -H 'Content-Type: application/json' ``` </curl> </inferencesnippet> <Tip> To see all possible deploy flags and options, you can use the `--help` flag. It's possible to configure the number of shards, quantization, generation parameters, and more. ```bash docker run ghcr.io/huggingface/text-generation-inference:3.3.4 --help ``` </Tip>
text-generation-inference/docs/source/quicktour.md/0
{ "file_path": "text-generation-inference/docs/source/quicktour.md", "repo_id": "text-generation-inference", "token_count": 1206 }
295
import os import json for root, dirs, files in os.walk("."): for filename in files: if filename.endswith(".json"): with open(os.path.join(root, filename), "r") as f: data = json.load(f) print(os.path.join(root, filename)) try: if filename.endswith("_load.json"): for i in range(len(data)): data[i]["details"]["prefill"] = [] else: data["details"]["prefill"] = [] except Exception: pass with open(os.path.join(root, filename), "w") as f: json.dump(data, f, indent=2, ensure_ascii=False)
text-generation-inference/integration-tests/models/__snapshots__/test.py/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test.py", "repo_id": "text-generation-inference", "token_count": 388 }
296
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": null, "tokens": [ { "id": 13, "logprob": -1.9306641, "special": false, "text": "\n" }, { "id": 5618, "logprob": -2.4550781, "special": false, "text": "What" }, { "id": 338, "logprob": -0.5732422, "special": false, "text": " is" }, { "id": 278, "logprob": -1.5761719, "special": false, "text": " the" }, { "id": 4328, "logprob": -1.5888672, "special": false, "text": " difference" }, { "id": 1546, "logprob": -0.026504517, "special": false, "text": " between" }, { "id": 21784, "logprob": -1.4287109, "special": false, "text": " Deep" }, { "id": 29257, "logprob": -0.15856934, "special": false, "text": " Learning" }, { "id": 322, "logprob": -0.17456055, "special": false, "text": " and" }, { "id": 6189, "logprob": -0.62646484, "special": false, "text": " Machine" } ], "top_tokens": null }, "generated_text": "\nWhat is the difference between Deep Learning and Machine" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_awq/test_flash_llama_awq.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_awq/test_flash_llama_awq.json", "repo_id": "text-generation-inference", "token_count": 868 }
297
{ "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 16, "prefill": [], "seed": null, "tokens": [ { "id": 506, "logprob": -1.3984375, "special": false, "text": " the" }, { "id": 1331, "logprob": -1.6953125, "special": false, "text": " people" }, { "id": 236764, "logprob": -0.23535156, "special": false, "text": "," }, { "id": 532, "logprob": -0.24316406, "special": false, "text": " and" }, { "id": 506, "logprob": -0.12109375, "special": false, "text": " the" }, { "id": 2780, "logprob": -1.1640625, "special": false, "text": " food" }, { "id": 236761, "logprob": -0.21386719, "special": false, "text": "." }, { "id": 108, "logprob": -0.64453125, "special": false, "text": "\n\n" }, { "id": 2094, "logprob": -0.77734375, "special": false, "text": "This" }, { "id": 563, "logprob": -0.040283203, "special": false, "text": " is" }, { "id": 496, "logprob": -0.03125, "special": false, "text": " a" }, { "id": 6290, "logprob": -0.03515625, "special": false, "text": " nice" }, { "id": 1977, "logprob": -0.0020751953, "special": false, "text": " place" }, { "id": 236761, "logprob": -0.0079956055, "special": false, "text": "." }, { "id": 107, "logprob": -0.9921875, "special": false, "text": "\n" }, { "id": 106, "logprob": -0.45507812, "special": true, "text": "<end_of_turn>" } ], "top_tokens": null }, "generated_text": " the people, and the food.\n\nThis is a nice place.\n" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_gemma3/test_exceed_window.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_gemma3/test_exceed_window.json", "repo_id": "text-generation-inference", "token_count": 1338 }
298
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": null, "tokens": [ { "id": 13, "logprob": -2.0507812, "special": false, "text": "\n" }, { "id": 13, "logprob": -2.3007812, "special": false, "text": "\n" }, { "id": 29902, "logprob": -2.0449219, "special": false, "text": "I" }, { "id": 505, "logprob": -1.3242188, "special": false, "text": " have" }, { "id": 263, "logprob": -0.2076416, "special": false, "text": " a" }, { "id": 1243, "logprob": -2.0273438, "special": false, "text": " test" }, { "id": 2009, "logprob": -0.6845703, "special": false, "text": " request" }, { "id": 515, "logprob": -1.1748047, "special": false, "text": " from" }, { "id": 263, "logprob": -1.0644531, "special": false, "text": " a" }, { "id": 1404, "logprob": -1.5224609, "special": false, "text": " user" } ], "top_tokens": null }, "generated_text": "\n\nI have a test request from a user" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_marlin/test_flash_llama_marlin.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_marlin/test_flash_llama_marlin.json", "repo_id": "text-generation-inference", "token_count": 864 }
299
{ "details": { "finish_reason": "length", "generated_tokens": 40, "prefill": [], "seed": null, "tokens": [ { "id": 13, "logprob": -0.27416992, "special": false, "text": "\n" }, { "id": 13, "logprob": -0.17016602, "special": false, "text": "\n" }, { "id": 28737, "logprob": -2.7109375, "special": false, "text": "I" }, { "id": 28809, "logprob": -1.5, "special": false, "text": "’" }, { "id": 28719, "logprob": -0.34204102, "special": false, "text": "m" }, { "id": 459, "logprob": -1.6914062, "special": false, "text": " not" }, { "id": 1864, "logprob": -0.69140625, "special": false, "text": " sure" }, { "id": 513, "logprob": -1.6171875, "special": false, "text": " if" }, { "id": 315, "logprob": -1.3837891, "special": false, "text": " I" }, { "id": 541, "logprob": -1.2226562, "special": false, "text": " can" }, { "id": 1567, "logprob": -1.8652344, "special": false, "text": " come" }, { "id": 582, "logprob": -0.0070228577, "special": false, "text": " up" }, { "id": 395, "logprob": -0.0054092407, "special": false, "text": " with" }, { "id": 28705, "logprob": -0.62597656, "special": false, "text": " " }, { "id": 28770, "logprob": -0.0035572052, "special": false, "text": "3" }, { "id": 4842, "logprob": -0.93603516, "special": false, "text": " unique" }, { "id": 3085, "logprob": -0.028411865, "special": false, "text": " words" }, { "id": 369, "logprob": -1.0400391, "special": false, "text": " that" }, { "id": 6685, "logprob": -0.09710693, "special": false, "text": " describe" }, { "id": 528, "logprob": -0.066467285, "special": false, "text": " me" }, { "id": 28725, "logprob": -1.0722656, "special": false, "text": "," }, { "id": 562, "logprob": -0.33422852, "special": false, "text": " but" }, { "id": 315, "logprob": -0.5136719, "special": false, "text": " I" }, { "id": 28809, "logprob": -0.8989258, "special": false, "text": "’" }, { "id": 584, "logprob": -0.2076416, "special": false, "text": "ll" }, { "id": 1464, "logprob": -0.8808594, "special": false, "text": " try" }, { "id": 28723, "logprob": -0.88427734, "special": false, "text": "." }, { "id": 13, "logprob": -0.91064453, "special": false, "text": "\n" }, { "id": 13, "logprob": -0.08105469, "special": false, "text": "\n" }, { "id": 28740, "logprob": -1.8486328, "special": false, "text": "1" }, { "id": 28723, "logprob": -0.111572266, "special": false, "text": "." }, { "id": 23626, "logprob": -3.15625, "special": false, "text": " Creative" }, { "id": 13, "logprob": -0.9194336, "special": false, "text": "\n" }, { "id": 28750, "logprob": -0.24841309, "special": false, "text": "2" }, { "id": 28723, "logprob": -9.393692e-05, "special": false, "text": "." }, { "id": 6785, "logprob": -3.1386719, "special": false, "text": " Fun" }, { "id": 1780, "logprob": -0.53564453, "special": false, "text": "ny" }, { "id": 13, "logprob": -0.09033203, "special": false, "text": "\n" }, { "id": 28770, "logprob": -0.00466156, "special": false, "text": "3" }, { "id": 28723, "logprob": -0.00016450882, "special": false, "text": "." } ] }, "generated_text": "\n\nI’m not sure if I can come up with 3 unique words that describe me, but I’ll try.\n\n1. Creative\n2. Funny\n3." }
text-generation-inference/integration-tests/models/__snapshots__/test_lora_mistral/test_lora_mistral_with_customer_support_adapter.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_lora_mistral/test_lora_mistral_with_customer_support_adapter.json", "repo_id": "text-generation-inference", "token_count": 3128 }
300
import pytest import requests from openai import OpenAI from huggingface_hub import InferenceClient @pytest.fixture(scope="module") def flash_llama_completion_handle(launcher): with launcher( "meta-llama/Meta-Llama-3.1-8B-Instruct", ) as handle: yield handle @pytest.fixture(scope="module") async def flash_llama_completion(flash_llama_completion_handle): await flash_llama_completion_handle.health(300) return flash_llama_completion_handle.client # NOTE: since `v1/completions` is a deprecated inferface/endpoint we do not provide a convience # method for it. Instead, we use the `requests` library to make the HTTP request directly. @pytest.mark.release def test_flash_llama_completion_single_prompt( flash_llama_completion, response_snapshot ): response = requests.post( f"{flash_llama_completion.base_url}/v1/completions", json={ "model": "tgi", "prompt": "What is Deep Learning?", "max_tokens": 10, "temperature": 0.0, }, headers=flash_llama_completion.headers, stream=False, ) response = response.json() assert len(response["choices"]) == 1 assert ( response["choices"][0]["text"] == " A Beginner’s Guide\nDeep learning is a subset" ) assert response == response_snapshot @pytest.mark.release async def test_flash_llama_completion_stream_usage( flash_llama_completion, response_snapshot ): client = InferenceClient(base_url=f"{flash_llama_completion.base_url}/v1") stream = client.chat_completion( model="tgi", messages=[ { "role": "user", "content": "What is Deep Learning?", } ], max_tokens=10, temperature=0.0, stream_options={"include_usage": True}, stream=True, ) string = "" chunks = [] had_usage = False for chunk in stream: # remove "data:" chunks.append(chunk) if len(chunk.choices) == 1: index = chunk.choices[0].index assert index == 0 string += chunk.choices[0].delta.content if chunk.usage: assert not had_usage had_usage = True assert had_usage assert ( string == "**Deep Learning: An Overview**\n=====================================\n\n" ) assert chunks == response_snapshot stream = client.chat_completion( model="tgi", messages=[ { "role": "user", "content": "What is Deep Learning?", } ], max_tokens=10, temperature=0.0, # No usage # stream_options={"include_usage": True}, stream=True, ) string = "" chunks = [] had_usage = False for chunk in stream: chunks.append(chunk) assert chunk.usage is None assert len(chunk.choices) == 1 assert chunk.choices[0].index == 0 string += chunk.choices[0].delta.content assert ( string == "**Deep Learning: An Overview**\n=====================================\n\n" ) @pytest.mark.release def test_flash_llama_completion_many_prompts(flash_llama_completion, response_snapshot): response = requests.post( f"{flash_llama_completion.base_url}/v1/completions", json={ "model": "tgi", "prompt": [ "What is Deep Learning?", "Is water wet?", "What is the capital of France?", "def mai", ], "max_tokens": 10, "seed": 0, "temperature": 0.0, }, headers=flash_llama_completion.headers, stream=False, ) response = response.json() assert len(response["choices"]) == 4 all_indexes = [(choice["index"], choice["text"]) for choice in response["choices"]] all_indexes.sort() all_indices, all_strings = zip(*all_indexes) assert list(all_indices) == [0, 1, 2, 3] assert list(all_strings) == [ " A Beginner’s Guide\nDeep learning is a subset", " This is a question that has puzzled many people for", " Paris\nWhat is the capital of France?\nThe", 'usculas_minusculas(s):\n """\n', ] assert response == response_snapshot @pytest.mark.release async def test_flash_llama_completion_many_prompts_stream( flash_llama_completion, response_snapshot ): client = OpenAI(api_key="xx", base_url=f"{flash_llama_completion.base_url}/v1") stream = client.completions.create( model="tgi", prompt=[ "What is Deep Learning?", "Is water wet?", "What is the capital of France?", "def mai", ], max_tokens=10, seed=0, temperature=0.0, stream=True, ) strings = [""] * 4 chunks = [] for chunk in stream: chunks.append(chunk) index = chunk.choices[0].index assert 0 <= index <= 4 strings[index] += chunk.choices[0].text assert list(strings) == [ " A Beginner’s Guide\nDeep learning is a subset", " This is a question that has puzzled many people for", " Paris\nWhat is the capital of France?\nThe", 'usculas_minusculas(s):\n """\n', ] assert chunks == response_snapshot @pytest.mark.release async def test_chat_openai_usage(flash_llama_completion, response_snapshot): client = OpenAI(api_key="xx", base_url=f"{flash_llama_completion.base_url}/v1") stream = client.chat.completions.create( model="tgi", messages=[{"role": "user", "content": "Say 'OK!'"}], stream=True, max_tokens=10, seed=42, stream_options={"include_usage": True}, ) chunks = [] for chunk in stream: chunks.append(chunk) for chunk in chunks[:-1]: assert chunk.usage is None for chunk in chunks[-1:]: assert chunk.usage is not None assert chunks == response_snapshot @pytest.mark.release async def test_chat_openai_nousage(flash_llama_completion, response_snapshot): client = OpenAI(api_key="xx", base_url=f"{flash_llama_completion.base_url}/v1") stream = client.chat.completions.create( model="tgi", messages=[{"role": "user", "content": "Say 'OK!'"}], stream=True, max_tokens=10, seed=42, stream_options={"include_usage": False}, ) chunks = [] for chunk in stream: assert chunk.usage is None chunks.append(chunk) assert chunks == response_snapshot @pytest.mark.release async def test_chat_hfhub_usage(flash_llama_completion, response_snapshot): client = InferenceClient(base_url=f"{flash_llama_completion.base_url}/v1") stream = client.chat_completion( model="tgi", messages=[{"role": "user", "content": "Say 'OK!'"}], stream=True, max_tokens=10, seed=42, stream_options={"include_usage": True}, ) chunks = [] for chunk in stream: chunks.append(chunk) for chunk in chunks[:-1]: assert chunk.usage is None for chunk in chunks[-1:]: assert chunk.usage is not None assert chunks == response_snapshot @pytest.mark.release async def test_chat_hfhub_nousage(flash_llama_completion, response_snapshot): client = InferenceClient(base_url=f"{flash_llama_completion.base_url}/v1") stream = client.chat_completion( model="tgi", messages=[{"role": "user", "content": "Say 'OK!'"}], stream=True, max_tokens=10, seed=42, stream_options={"include_usage": False}, ) chunks = [] for chunk in stream: assert chunk.usage is None chunks.append(chunk) assert chunks == response_snapshot
text-generation-inference/integration-tests/models/test_completion_prompts.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_completion_prompts.py", "repo_id": "text-generation-inference", "token_count": 3527 }
301
import pytest import json from text_generation.types import GrammarType @pytest.fixture(scope="module") def flash_llama_grammar_handle(launcher): with launcher( "TinyLlama/TinyLlama-1.1B-Chat-v1.0", num_shard=2, disable_grammar_support=False ) as handle: yield handle @pytest.fixture(scope="module") async def flash_llama_grammar(flash_llama_grammar_handle): await flash_llama_grammar_handle.health(300) return flash_llama_grammar_handle.client @pytest.mark.asyncio async def test_flash_llama_grammar(flash_llama_grammar, response_snapshot): response = await flash_llama_grammar.generate( "Test request", max_new_tokens=10, decoder_input_details=True ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.skip @pytest.mark.asyncio async def test_flash_llama_grammar_regex(flash_llama_grammar, response_snapshot): response = await flash_llama_grammar.generate( "Whats Googles DNS", max_new_tokens=10, decoder_input_details=True, seed=0, grammar={ "type": GrammarType.Regex, # "regex" "value": "((25[0-5]|2[0-4]\\d|[01]?\\d\\d?)\\.){3}(25[0-5]|2[0-4]\\d|[01]?\\d\\d?)", }, ) assert response.details.generated_tokens == 10 assert response.generated_text == "42.1.1.101" assert response == response_snapshot @pytest.mark.skip @pytest.mark.asyncio async def test_flash_llama_grammar_json(flash_llama_grammar, response_snapshot): response = await flash_llama_grammar.generate( "info: david holtz like trees and has two cats. ", max_new_tokens=100, decoder_input_details=True, seed=0, grammar={ "type": GrammarType.Json, # "json" "value": json.dumps( { "type": "object", "$id": "https://example.com/person.schema.json", "$schema": "https://json-schema.org/draft/2020-12/schema", "title": "Person", "properties": { "firstName": { "type": "string", "description": "The person'''s first name.", }, "lastName": { "type": "string", "description": "The person'''s last name.", }, "hobby": { "description": "The person'''s hobby.", "type": "string", }, "numCats": { "description": "The number of cats the person has.", "type": "integer", "minimum": 0, }, }, "required": ["firstName", "lastName", "hobby", "numCats"], } ), }, ) assert response.details.generated_tokens == 30 assert ( response.generated_text == '{"firstName":"David","hobby":"Trees","lastName":"Holtz","numCats":2}' ) assert response == response_snapshot @pytest.mark.skip @pytest.mark.asyncio async def test_flash_llama_grammar_load( flash_llama_grammar, generate_load, response_snapshot ): responses = await generate_load( flash_llama_grammar, "name: david. email: ", max_new_tokens=10, n=4, stop_sequences=[".com"], seed=0, grammar={ "type": GrammarType.Regex, # "regex" "value": "[\\w-]+@([\\w-]+\\.)+[\\w-]+", # email regex }, ) assert len(responses) == 4 expected = "123456@gmail.com" for response in responses: assert response.generated_text == expected assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot # this is the same as the above test, but only fires off a single request # this is only to ensure that the parallel and single inference produce the same result @pytest.mark.skip @pytest.mark.asyncio async def test_flash_llama_grammar_single_load_instance( flash_llama_grammar, generate_load, response_snapshot ): response = await flash_llama_grammar.generate( "name: david. email: ", max_new_tokens=10, stop_sequences=[".com"], seed=0, grammar={ "type": GrammarType.Regex, # "regex" "value": "[\\w-]+@([\\w-]+\\.)+[\\w-]+", # email regex }, ) # assert response.details.generated_tokens == 30 assert response.generated_text == "123456@gmail.com" assert response == response_snapshot
text-generation-inference/integration-tests/models/test_flash_grammar_llama.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_flash_grammar_llama.py", "repo_id": "text-generation-inference", "token_count": 2366 }
302
import pytest @pytest.fixture(scope="module") def flash_neox_sharded_handle(launcher): with launcher("OpenAssistant/oasst-sft-1-pythia-12b", num_shard=2) as handle: yield handle @pytest.fixture(scope="module") async def flash_neox_sharded(flash_neox_sharded_handle): await flash_neox_sharded_handle.health(300) return flash_neox_sharded_handle.client @pytest.mark.release @pytest.mark.asyncio async def test_flash_neox(flash_neox_sharded, response_snapshot): response = await flash_neox_sharded.generate( "<|prompter|>What is a meme, and what's the history behind this word?<|endoftext|><|assistant|>", max_new_tokens=10, decoder_input_details=True, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.release @pytest.mark.asyncio async def test_flash_neox_load(flash_neox_sharded, generate_load, response_snapshot): responses = await generate_load( flash_neox_sharded, "<|prompter|>What is a meme, and what's the history behind this word?<|endoftext|><|assistant|>", max_new_tokens=10, n=4, ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
text-generation-inference/integration-tests/models/test_flash_neox_sharded.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_flash_neox_sharded.py", "repo_id": "text-generation-inference", "token_count": 507 }
303
import pytest @pytest.fixture(scope="module") def flash_idefics2_next_handle(launcher): with launcher( "HuggingFaceM4/idefics2-8b", ) as handle: yield handle @pytest.fixture(scope="module") async def flash_idefics2_next(flash_idefics2_next_handle): await flash_idefics2_next_handle.health(300) return flash_idefics2_next_handle.client @pytest.mark.asyncio @pytest.mark.private async def test_flash_idefics2_next_simple( flash_idefics2_next, response_snapshot, chicken ): response = await flash_idefics2_next.generate( f"User:![]({chicken})Write me a short story<end_of_utterance> \nAssistant:", max_new_tokens=10, ) assert ( response.generated_text == " A chicken is sitting on a pile of money." ), f"{repr(response.generated_text)}" assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_idefics2_two_images( flash_idefics2_next, response_snapshot, chicken, cow_beach ): response = await flash_idefics2_next.generate( f"User:![]({chicken})![]({cow_beach})Where are the cow and chicken?<end_of_utterance> \nAssistant:", max_new_tokens=20, ) assert ( response.generated_text == " The cow is standing on the beach and the chicken is sitting on a pile of money." ), f"{repr(response.generated_text)}" assert response.details.generated_tokens == 19 assert response == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_idefics2_next_all_params(flash_idefics2_next, response_snapshot): response = await flash_idefics2_next.generate( "Test request", max_new_tokens=10, repetition_penalty=1.2, return_full_text=True, stop_sequences=["test"], temperature=0.5, top_p=0.9, top_k=10, truncate=5, typical_p=0.9, watermark=True, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_idefics2_next_load( flash_idefics2_next, generate_load, response_snapshot, chicken ): responses = await generate_load( flash_idefics2_next, f"User:![]({chicken})Write me a short story<end_of_utterance> \nAssistant:", max_new_tokens=10, n=4, ) generated_texts = [r.generated_text for r in responses] assert generated_texts[0] == " A chicken is sitting on a pile of money." assert len(generated_texts) == 4 assert all([r.generated_text == generated_texts[0] for r in responses]) assert responses == response_snapshot
text-generation-inference/integration-tests/models/test_idefics2.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_idefics2.py", "repo_id": "text-generation-inference", "token_count": 1159 }
304
import pytest @pytest.fixture(scope="module") def flash_llama_handle(launcher): with launcher("allenai/OLMo-7B-0724-Instruct-hf", num_shard=2) as handle: yield handle @pytest.fixture(scope="module") async def flash_llama(flash_llama_handle): await flash_llama_handle.health(300) return flash_llama_handle.client @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_simple(flash_llama, response_snapshot): response = await flash_llama.generate( "Test request", max_new_tokens=10, decoder_input_details=True ) assert response.details.generated_tokens == 10 assert response.generated_text == ':\n\n```json\n{\n "' assert response == response_snapshot @pytest.mark.asyncio @pytest.mark.private async def test_flash_llama_load(flash_llama, generate_load, response_snapshot): responses = await generate_load(flash_llama, "Test request", max_new_tokens=10, n=4) assert len(responses) == 4 assert responses[0].generated_text == ':\n\n```json\n{\n "' assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
text-generation-inference/integration-tests/models/test_transformers_olmo.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_transformers_olmo.py", "repo_id": "text-generation-inference", "token_count": 431 }
305
// Adapted from turboderp exllama: https://github.com/turboderp/exllama #define _cuda_buffers_cu #include "cuda_buffers.cuh" CudaBuffers* g_buffers[CUDA_MAX_DEVICES] = {NULL}; // __constant__ half2 q4_table[16][256]; // half2 q4_table_host[16][256]; // bool q4_table_init = false; CudaBuffers::CudaBuffers ( int _device, half* _temp_state, half* _temp_dq ) : device(_device), temp_state(_temp_state), temp_dq(_temp_dq) { cudaSetDevice(_device); cudaStreamCreate(&alt_stream_1); cudaStreamCreate(&alt_stream_2); cudaStreamCreate(&alt_stream_3); cudaEventCreate(&alt_stream_1_done); cudaEventCreate(&alt_stream_2_done); cudaEventCreate(&alt_stream_3_done); } CudaBuffers::~CudaBuffers() { cudaStreamDestroy(alt_stream_1); cudaStreamDestroy(alt_stream_2); cudaStreamDestroy(alt_stream_3); cudaEventDestroy(alt_stream_1_done); cudaEventDestroy(alt_stream_2_done); cudaEventDestroy(alt_stream_3_done); } CudaBuffers* get_buffers(const int device_index) { return g_buffers[device_index]; } void prepare_buffers_cuda ( int _device, half* _temp_state, half* _temp_dq ) { CudaBuffers* buffers = new CudaBuffers ( _device, _temp_state, _temp_dq ); g_buffers[_device] = buffers; } void cleanup_buffers_cuda() { for (int i = 0; i < CUDA_MAX_DEVICES; i++) { if (!g_buffers[i]) continue; delete g_buffers[i]; g_buffers[i] = NULL; } }
text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_buffers.cu/0
{ "file_path": "text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_buffers.cu", "repo_id": "text-generation-inference", "token_count": 680 }
306
#include <torch/extension.h> #include <c10/cuda/CUDAGuard.h> #include <ATen/cuda/CUDAContext.h> #include <cuda_runtime.h> #include <cuda_fp16.h> #include <cstdint> #include <cstdio> #include "config.h" #include "cuda/q_matrix.cuh" #include "cuda/q_gemm.cuh" #include "cpp/util.h" // Some decluttering macros #define TORCH_CHECK_DTYPE(__x, __dtype) TORCH_CHECK((__x).dtype() == torch::__dtype, #__x " is incorrect datatype, must be " #__dtype) #define TORCH_CHECK_DTYPE_OPT(__x, __dtype) TORCH_CHECK((__x).device().is_meta() || (__x).dtype() == torch::__dtype, #__x " is incorrect datatype, must be " #__dtype) #define TORCH_CHECK_SHAPES(__x, __dim_x, __y, __dim_y, __scale_y) TORCH_CHECK((__x).size(__dim_x) == (__y).size(__dim_y) * __scale_y, #__x " and " #__y " have incompatible shapes") #define TORCH_CHECK_SHAPES_OPT(__x, __dim_x, __y, __dim_y, __scale_y) TORCH_CHECK((__x).device().is_meta() || (__x).size(__dim_x) == (__y).size(__dim_y) * __scale_y, #__x " and " #__y " have incompatible shapes") // Quant matrix uintptr_t make_q_matrix ( torch::Tensor q_weight, torch::Tensor q_perm, torch::Tensor q_invperm, torch::Tensor q_scale, torch::Tensor q_scale_max, torch::Tensor q_groups, torch::Tensor q_group_map, torch::Tensor gptq_qzeros, torch::Tensor gptq_scales, torch::Tensor gptq_g_idx, torch::Tensor temp_dq ) { TORCH_CHECK_DTYPE(q_weight, kInt); TORCH_CHECK_DTYPE_OPT(q_perm, kShort); TORCH_CHECK_DTYPE_OPT(q_invperm, kShort); TORCH_CHECK_DTYPE_OPT(q_scale, kInt); TORCH_CHECK_DTYPE_OPT(q_scale_max, kHalf); TORCH_CHECK_DTYPE_OPT(q_groups, kShort); TORCH_CHECK_DTYPE_OPT(q_group_map, kShort); TORCH_CHECK_DTYPE_OPT(gptq_qzeros, kInt); TORCH_CHECK_DTYPE_OPT(gptq_scales, kHalf); TORCH_CHECK_DTYPE_OPT(gptq_g_idx, kInt); TORCH_CHECK_SHAPES(q_perm, 0, q_invperm, 0, 1); int device = q_weight.device().index(); int width = q_weight.size(1); int groups; int height; if (!q_scale.device().is_meta()) { TORCH_CHECK_SHAPES(q_weight, 1, q_scale, 1, 8); TORCH_CHECK_SHAPES(q_scale_max, 0, q_scale, 0, 1); groups = q_scale.size(0); height = q_invperm.size(0); } else { TORCH_CHECK_SHAPES(q_weight, 1, gptq_qzeros, 1, 8); TORCH_CHECK_SHAPES(q_weight, 1, gptq_scales, 1, 1); groups = gptq_qzeros.size(0); height = q_weight.size(0) * 8; } TORCH_CHECK(temp_dq.size(0) >= width * height, "Insufficient size of temp_dq buffer") QMatrix* m = new QMatrix ( device, height, width, groups, (uint32_t*) q_weight.data_ptr(), q_perm.device().is_meta() ? NULL : (uint16_t*) q_perm.data_ptr(), q_invperm.device().is_meta() ? NULL : (uint16_t*) q_invperm.data_ptr(), q_scale.device().is_meta() ? NULL : (uint32_t*) q_scale.data_ptr(), q_scale_max.device().is_meta() ? NULL : (half*) q_scale_max.data_ptr(), q_groups.device().is_meta() ? NULL : (uint16_t*) q_groups.data_ptr(), q_group_map.device().is_meta() ? NULL : (uint16_t*) q_group_map.data_ptr(), gptq_qzeros.device().is_meta() ? NULL : (uint32_t*) gptq_qzeros.data_ptr(), gptq_scales.device().is_meta() ? NULL : (half*) gptq_scales.data_ptr(), gptq_g_idx.device().is_meta() ? NULL : (uint32_t*) gptq_g_idx.data_ptr(), (half*) temp_dq.data_ptr() ); if (m->failed) throw std::runtime_error("CUDA out of memory"); return reinterpret_cast<uintptr_t> (m); } void gemm_half_q_half ( torch::Tensor a, uintptr_t b, torch::Tensor c, bool force_cuda ) { QMatrix* qm = reinterpret_cast<QMatrix*> (b); TORCH_CHECK_DTYPE(a, kHalf); TORCH_CHECK_DTYPE(c, kHalf); TORCH_CHECK_SHAPES(a, 0, c, 0, 1); TORCH_CHECK(qm->height == a.size(1), "a and b have incompatible shapes") TORCH_CHECK(qm->width == c.size(1), "b and c have incompatible shapes") const at::cuda::OptionalCUDAGuard device_guard(device_of(a)); gemm_half_q_half_cuda ( at::cuda::getCurrentCUDABlasHandle(), (const half*) a.data_ptr(), qm, (half*) c.data_ptr(), c.size(0), // m c.size(1), // n a.size(1), // k true, NULL, force_cuda ); } // Bindings PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def("make_q_matrix", &make_q_matrix, "make_q_matrix"); m.def("gemm_half_q_half", &gemm_half_q_half, "gemm_half_q_half"); }
text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/ext.cpp/0
{ "file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/ext.cpp", "repo_id": "text-generation-inference", "token_count": 2184 }
307
from text_generation_server.utils.hub import ( download_weights, weight_hub_files, weight_files, ) from text_generation_server.utils.convert import convert_files def test_convert_files(): model_id = "bigscience/bloom-560m" pt_filenames = weight_hub_files(model_id, extension=".bin") local_pt_files = download_weights(pt_filenames, model_id) local_st_files = [ p.parent / f"{p.stem.lstrip('pytorch_')}.safetensors" for p in local_pt_files ] convert_files(local_pt_files, local_st_files, discard_names=[]) found_st_files = weight_files(model_id) assert all([p in found_st_files for p in local_st_files])
text-generation-inference/server/tests/utils/test_convert.py/0
{ "file_path": "text-generation-inference/server/tests/utils/test_convert.py", "repo_id": "text-generation-inference", "token_count": 259 }
308
from dataclasses import dataclass import torch from typing import Optional @dataclass class Seqlen: input_lengths: torch.Tensor cache_lengths: torch.Tensor cu_seqlen_q: Optional[torch.Tensor] cu_seqlen_k: Optional[torch.Tensor] max_q: int max_k: int def __init__( self, input_lengths, cache_lengths, cu_seqlen_q=None, max_q=None, max_k=None, ): self.input_lengths = input_lengths self.cache_lengths = cache_lengths device = self.input_lengths.device shape = self.input_lengths.shape if cu_seqlen_q is None: cu_seqlen_q = torch.arange( shape[0] + 1, device=device, dtype=torch.int32, ) max_q = 1 else: assert max_q is not None assert max_k is not None cu_seqlen_k = torch.zeros(shape[-1] + 1, device=device, dtype=torch.int32) # cuda graphs don't like this and this is necessary to clamp within mistral # Although FA2 might not want the clamping # cu_seqlen_k[0] = 0 total = self.input_lengths + self.cache_lengths torch.cumsum(total, -1, out=cu_seqlen_k[1:]) self.cu_seqlen_q = cu_seqlen_q self.cu_seqlen_k = cu_seqlen_k self.max_q = max_q self.max_k = max_k def clamp(self, max): # Flash decoding doesn't need to clamp return self
text-generation-inference/server/text_generation_server/layers/attention/common.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/layers/attention/common.py", "repo_id": "text-generation-inference", "token_count": 739 }
309
from typing import List, Union import torch from compressed_tensors.quantization import ActivationOrdering, QuantizationArgs from loguru import logger from text_generation_server.layers.marlin.gptq import repack_gptq_for_marlin from text_generation_server.utils.log import log_once from text_generation_server.utils.weights import Weights, WeightsLoader class WNA16IntLoader(WeightsLoader): """ Loader for W4A16/W8A16 INT compressed-tensors parameters. """ def __init__(self, weights: QuantizationArgs): self.weights = weights self.desc_act = self.weights.actorder == ActivationOrdering.GROUP self.groupsize = ( -1 if self.weights.group_size is None else self.weights.group_size ) def __str__(self) -> str: quantization_type = f"W{self.weights.num_bits}A16" return f"{self.__class__.__name__} ({quantization_type})" def get_weights(self, weights: Weights, prefix: str): log_once(logger.info, "Using GPTQ-Marlin kernels") try: weight_packed = weights.get_tensor(f"{prefix}.weight_packed").t() except RuntimeError: raise RuntimeError( f"Cannot load w{self.weights.num_bits}a16 weight, make sure the model is already quantized" ) zero_point = None if not self.weights.symmetric: zero_point = weights.get_tensor(f"{prefix}.weight_zero_point").t() g_idx = None if self.desc_act: g_idx = weights.get_tensor(f"{prefix}.weight_g_idx") scales = weights.get_tensor(f"{prefix}.weight.scales").t() return repack_gptq_for_marlin( qweight=weight_packed.contiguous(), scales=scales, qzeros=zero_point, g_idx=g_idx, bits=self.weights.num_bits, desc_act=self.desc_act, groupsize=self.groupsize, quant_method="compressed-tensors", sym=self.weights.symmetric, sharded_infeatures=False, ) def get_weights_col_packed( self, weights: Weights, prefix: str, block_sizes: Union[int, List[int]], ): try: weight_packed = weights.get_packed_sharded( f"{prefix}.weight_packed", dim=0, block_sizes=block_sizes ).t() except RuntimeError: raise RuntimeError( f"Cannot load w{self.weights.num_bits}a16 weight, make sure the model is already quantized" ) scales = weights.get_packed_sharded( f"{prefix}.weight_scale", dim=0, block_sizes=block_sizes ).t() scales = scales.to(dtype=weights.dtype) zero_point = None if not self.weights.symmetric: zero_point = weights.get_packed_sharded( f"{prefix}.qzeros", dim=0, block_sizes=block_sizes ).t() g_idx = None if self.desc_act: g_idx = weights.get_tensor(f"{prefix}.g_idx") return repack_gptq_for_marlin( qweight=weight_packed.contiguous(), scales=scales, qzeros=zero_point, g_idx=g_idx, bits=self.weights.num_bits, desc_act=self.desc_act, groupsize=self.groupsize, quant_method="compressed-tensors", sym=self.weights.symmetric, sharded_infeatures=False, ) def get_multi_weights_col(self, weights: Weights, prefixes: List[str], dim: int): try: weight_packed = torch.cat( [ weights.get_sharded(f"{p}.weight_packed", dim=0).t() for p in prefixes ], dim=1, ) except RuntimeError: raise RuntimeError( f"Cannot load w{self.weights.num_bits}a16 weight, make sure the model is already quantized" ) scales = torch.cat( [weights.get_sharded(f"{p}.weight_scale", dim=0).t() for p in prefixes], dim=1, ) zero_point = None if not self.weights.symmetric: zero_point = torch.cat( [weights.get_sharded(f"{p}.qzeros", dim=0).t() for p in prefixes], dim=1 ).t() g_idx = None if self.desc_act: w = [weights.get_tensor(f"{p}.g_idx") for p in prefixes] for w2 in w[1:]: torch.testing.assert_close(w2, w[0]) g_idx = w[0] return repack_gptq_for_marlin( qweight=weight_packed.contiguous(), scales=scales, qzeros=zero_point, g_idx=g_idx, bits=self.weights.num_bits, desc_act=self.desc_act, groupsize=self.groupsize, quant_method="compressed-tensors", sym=self.weights.symmetric, sharded_infeatures=False, ) def get_weights_row(self, weights: Weights, prefix: str): log_once(logger.info, "Using GPTQ-Marlin kernels") try: weight_packed = weights.get_sharded(f"{prefix}.weight_packed", dim=1).t() except RuntimeError: raise RuntimeError( f"Cannot load `{self.quantize}` weight, make sure the model is already quantized." ) zero_point = None if not self.weights.symmetric: if self.desc_act or self.groupsize == -1: zero_point = weights.get_tensor(f"{prefix}.weight_zero_point").t() else: zero_point = weights.get_sharded( f"{prefix}.weight_zero_point", dim=1 ).t() g_idx = None if self.desc_act: g_idx = weights.get_sharded(f"{prefix}.g_idx", dim=0) if self.desc_act or self.groupsize == -1: scales = weights.get_tensor(f"{prefix}.weight_scale").t() else: scales = weights.get_sharded(f"{prefix}.weight_scale", dim=1).t() sharded_in_features = weights.process_group.size() > 1 return repack_gptq_for_marlin( qweight=weight_packed.contiguous(), scales=scales, qzeros=zero_point, g_idx=g_idx, bits=self.weights.num_bits, desc_act=self.desc_act, groupsize=self.groupsize, quant_method="compressed-tensors", sym=self.weights.symmetric, sharded_infeatures=sharded_in_features, )
text-generation-inference/server/text_generation_server/layers/compressed_tensors/wna16_int.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/layers/compressed_tensors/wna16_int.py", "repo_id": "text-generation-inference", "token_count": 3314 }
310
# ruff: noqa: F821 # the above line disables the `undefined-name` rule for the model type variables from compressed_tensors.compressors.model_compressors.model_compressor import ( QuantizationConfig, ) from compressed_tensors.quantization import QuantizationType from pydantic import ValidationError import enum import os from typing import Optional, List, Dict from pathlib import Path from loguru import logger import torch import transformers from transformers.configuration_utils import PretrainedConfig from transformers.models.auto import modeling_auto from transformers.dynamic_module_utils import get_class_from_dynamic_module from huggingface_hub import hf_hub_download, HfApi from text_generation_server.utils.speculate import get_speculate, set_speculate from text_generation_server.models.model import Model from text_generation_server.models.causal_lm import CausalLM, CausalLMBatchKeysLast from text_generation_server.models.custom_modeling.opt_modeling import OPTForCausalLM from text_generation_server.models.custom_modeling.mpt_modeling import ( MPTForCausalLM, ) from text_generation_server.models.bloom import BloomCausalLMBatch from text_generation_server.models.custom_modeling.bloom_modeling import ( BloomForCausalLM, ) from text_generation_server.models.globals import ATTENTION from text_generation_server.models.seq2seq_lm import Seq2SeqLM from text_generation_server.models.galactica import GalacticaCausalLMBatch from text_generation_server.models.custom_modeling.neox_modeling import ( GPTNeoxForCausalLM, ) from text_generation_server.models.custom_modeling.phi_modeling import ( PhiConfig, PhiForCausalLM, ) from text_generation_server.models.custom_modeling.flash_phi_moe_modeling import ( PhiMoEConfig, ) from text_generation_server.models.custom_modeling.t5_modeling import ( T5ForConditionalGeneration, ) from text_generation_server.utils.adapter import ( AdapterParameters, build_layer_weight_lookup, load_and_merge_adapters, AdapterInfo, ) from text_generation_server.adapters.lora import LoraWeights from text_generation_server.utils.import_utils import SYSTEM from text_generation_server.utils.log import log_master # The flag below controls whether to allow TF32 on matmul. This flag defaults to False # in PyTorch 1.12 and later. torch.backends.cuda.matmul.allow_tf32 = True # The flag below controls whether to allow TF32 on cuDNN. This flag defaults to True. torch.backends.cudnn.allow_tf32 = True # Disable gradients torch.set_grad_enabled(False) __all__ = [ "Model", "CausalLM", "Seq2SeqLM", "get_model_with_lora_adapters", ] FLASH_ATT_ERROR_MESSAGE = "{} requires Flash Attention enabled models." FLASH_ATTENTION = True try: from text_generation_server.models.flash_causal_lm import FlashCausalLM from text_generation_server.models.vlm_causal_lm import VlmCausalLM from text_generation_server.models.mllama_causal_lm import MllamaCausalLM from text_generation_server.models.custom_modeling.flash_deepseek_v2_modeling import ( FlashDeepseekV2ForCausalLM, DeepseekV2Config, ) from text_generation_server.models.custom_modeling.flash_deepseek_v3_modeling import ( FlashDeepseekV3ForCausalLM, DeepseekV3Config, ) from text_generation_server.models.custom_modeling.flash_llama_modeling import ( FlashLlamaForCausalLM, ) from text_generation_server.models.custom_modeling.flash_cohere_modeling import ( FlashCohereForCausalLM, ) from text_generation_server.models.custom_modeling.flash_gemma_modeling import ( FlashGemmaForCausalLM, ) from text_generation_server.models.custom_modeling.flash_gemma2_modeling import ( FlashGemma2ForCausalLM, ) from text_generation_server.models.custom_modeling.flash_gemma3_modeling import ( FlashGemma3ForCausalLM, Gemma3ForConditionalGeneration, ) from text_generation_server.models.custom_modeling.gemma3.processing_gemma3 import ( Gemma3Processor, ) from text_generation_server.models.custom_modeling.gemma3.configuration_gemma3 import ( Gemma3Config, Gemma3TextConfig, ) from text_generation_server.models.custom_modeling.flash_dbrx_modeling import ( FlashDbrxForCausalLM, DbrxConfig, ) from text_generation_server.models.custom_modeling.flash_rw_modeling import ( RWConfig, FlashRWForCausalLM, ) from text_generation_server.models.custom_modeling.flash_neox_modeling import ( FlashGPTNeoXForCausalLM, ) from text_generation_server.models.custom_modeling.flash_pali_gemma_modeling import ( PaliGemmaForConditionalGeneration, ) from text_generation_server.models.custom_modeling.flash_phi_modeling import ( FlashPhiForCausalLM, ) from text_generation_server.models.idefics_causal_lm import IdeficsCausalLM from text_generation_server.models.mllama_causal_lm import MllamaCausalLMBatch from text_generation_server.models.custom_modeling.mllama import ( MllamaForConditionalGeneration, ) from text_generation_server.models.custom_modeling.llava_next import ( LlavaNextForConditionalGeneration, ) from text_generation_server.models.custom_modeling.flash_santacoder_modeling import ( FlashSantacoderForCausalLM, ) from text_generation_server.models.custom_modeling.flash_starcoder2_modeling import ( FlashStarcoder2ForCausalLM, ) from text_generation_server.models.custom_modeling.flash_qwen2_modeling import ( Qwen2ForCausalLM, ) from text_generation_server.models.custom_modeling.flash_mistral_modeling import ( FlashMistralForCausalLM, ) from text_generation_server.models.custom_modeling.flash_mixtral_modeling import ( FlashMixtralForCausalLM, ) from text_generation_server.models.custom_modeling.flash_gpt2_modeling import ( FlashGPT2ForCausalLM, ) from text_generation_server.models.custom_modeling.flash_gptj_modeling import ( FlashGPTJForCausalLM, ) from text_generation_server.models.custom_modeling.idefics2 import ( Idefics2ForConditionalGeneration, ) from text_generation_server.models.custom_modeling.idefics3 import ( Idefics3ForConditionalGeneration, ) from text_generation_server.models.custom_modeling.qwen2_vl import ( Qwen2VLForConditionalGeneration, ) from text_generation_server.models.custom_modeling.qwen2_5_vl import ( Qwen2_5VLForConditionalGeneration, Qwen2_5_VLConfig, Qwen2_5_VLProcessor, ) from text_generation_server.layers.attention import SUPPORTS_WINDOWING except ImportError as e: log_master(logger.warning, f"Could not import Flash Attention enabled models: {e}") SUPPORTS_WINDOWING = False FLASH_ATTENTION = False if FLASH_ATTENTION: __all__.append(FlashCausalLM) __all__.append(IdeficsCausalLM) MAMBA_AVAILABLE = True try: from text_generation_server.models.mamba import Mamba except ImportError as e: log_master(logger.warning, f"Could not import Mamba: {e}") MAMBA_AVAILABLE = False if MAMBA_AVAILABLE: __all__.append(Mamba) FLASH_TRANSFORMERS_BACKEND = torch.cuda.is_available() or SYSTEM == "ipex" try: from text_generation_server.models.transformers_flash_causal_lm import ( TransformersFlashCausalLM, ) from text_generation_server.models.transformers_flash_vlm import ( TransformersFlashVlmCausalLM, TransformersGemma3VlmCausalLM, TransformersLlama4VlmCausalLM, ) except ImportError as e: log_master(logger.warning, f"Could not import Flash Transformers Backend: {e}") FLASH_TRANSFORMERS_BACKEND = False class ModelType(enum.Enum): DEEPSEEK_V2 = { "type": "deepseek_v2", "name": "Deepseek V2", "url": "https://huggingface.co/deepseek-ai/DeepSeek-V2", } DEEPSEEK_V3 = { "type": "deepseek_v3", "name": "Deepseek V3", "url": "https://huggingface.co/deepseek-ai/DeepSeek-V3", } IDEFICS2 = { "type": "idefics2", "name": "Idefics 2", "url": "https://huggingface.co/HuggingFaceM4/idefics2-8b", "multimodal": True, } IDEFICS3 = { "type": "idefics3", "name": "Idefics 3", "url": "https://huggingface.co/HuggingFaceM4/Idefics3-8B-Llama3", "multimodal": True, } LLAVA_NEXT = { "type": "llava_next", "name": "Llava Next (1.6)", "url": "https://huggingface.co/llava-hf/llava-v1.6-vicuna-13b-hf", "multimodal": True, } LLAMA = { "type": "llama", "name": "Llama", "url": "https://huggingface.co/collections/meta-llama/llama-31-669fc079a0c406a149a5738f", } LLAMA4 = { "type": "llama4", "name": "Llama4", "url": "https://huggingface.co/collections/meta-llama/llama-31-669fc079a0c406a149a5738f", } PHI3 = { "type": "phi3", "name": "Phi 3", "url": "https://huggingface.co/microsoft/Phi-3-mini-4k-instruct", } GRANITE = { "type": "granite", "name": "Granite", "url": "https://huggingface.co/ibm-granite/granite-3.0-8b-instruct", } GEMMA = { "type": "gemma", "name": "Gemma", "url": "https://huggingface.co/google/gemma-7b", } PALIGEMMA = { "type": "paligemma", "name": "PaliGemma", "url": "https://huggingface.co/google/paligemma-3b-pt-224", } GEMMA2 = { "type": "gemma2", "name": "Gemma2", "url": "https://huggingface.co/collections/google/gemma-2-release-667d6600fd5220e7b967f315", } GEMMA3 = { "type": "gemma3", "name": "Gemma3", "url": "https://huggingface.co/collections/google/gemma-3-release-67c6c6f89c4f76621268bb6d", } GEMMA3_TEXT = { "type": "gemma3_text", "name": "Gemma3 Text", "url": "https://huggingface.co/collections/google/gemma-3-release-67c6c6f89c4f76621268bb6d", } COHERE = { "type": "cohere", "name": "Cohere", "url": "https://huggingface.co/CohereForAI/c4ai-command-r-plus", } DBRX = { "type": "dbrx", "name": "Dbrx", "url": "https://huggingface.co/databricks/dbrx-instruct", } MAMBA = { "type": "mamba", "name": "Mamba", "url": "https://huggingface.co/state-spaces/mamba-2.8b-slimpj", } MISTRAL = { "type": "mistral", "name": "Mistral", "url": "https://huggingface.co/mistralai/Mistral-Nemo-Instruct-2407", } MIXTRAL = { "type": "mixtral", "name": "Mixtral", "url": "https://huggingface.co/mistralai/Mixtral-8x22B-Instruct-v0.1", } GPT_BIGCODE = { "type": "gpt_bigcode", "name": "Gpt Bigcode", "url": "https://huggingface.co/bigcode/gpt_bigcode-santacoder", } PHI = { "type": "phi", "name": "Phi", "url": "https://huggingface.co/microsoft/phi-1_5", } PHI_MOE = { "type": "phimoe", "name": "PhiMoe", "url": "https://huggingface.co/microsoft/Phi-3.5-MoE-instruct", } BAICHUAN = { "type": "baichuan", "name": "Baichuan", "url": "https://huggingface.co/baichuan-inc/Baichuan2-7B-Chat", } FALCON = { "type": "falcon", "name": "Falcon", "url": "https://huggingface.co/tiiuae/falcon-7b-instruct", } STARCODER2 = { "type": "starcoder2", "name": "StarCoder 2", "url": "https://huggingface.co/bigcode/starcoder2-15b-instruct-v0.1", } QWEN2 = { "type": "qwen2", "name": "Qwen 2", "url": "https://huggingface.co/collections/Qwen/qwen2-6659360b33528ced941e557f", } QWEN2_VL = { "type": "qwen2_vl", "name": "Qwen 2 VL", "url": "https://huggingface.co/collections/Qwen/qwen2-vl-66cee7455501d7126940800d", } QWEN2_5_VL = { "type": "qwen2_5_vl", "name": "Qwen 2.5 VL", "url": "https://huggingface.co/collections/Qwen/qwen25-66e81a666513e518adb90d9e", } OPT = { "type": "opt", "name": "Opt", "url": "https://huggingface.co/facebook/opt-6.7b", } T5 = { "type": "t5", "name": "T5", "url": "https://huggingface.co/google/flan-t5-xxl", } GALACTICA = { "type": "galactica", "name": "Galactica", "url": "https://huggingface.co/facebook/galactica-120b", } SANTACODER = { "type": "santacoder", "name": "SantaCoder", "url": "https://huggingface.co/bigcode/santacoder", } BLOOM = { "type": "bloom", "name": "Bloom", "url": "https://huggingface.co/bigscience/bloom-560m", } MPT = { "type": "mpt", "name": "Mpt", "url": "https://huggingface.co/mosaicml/mpt-7b-instruct", } GPT2 = { "type": "gpt2", "name": "Gpt2", "url": "https://huggingface.co/openai-community/gpt2", } GPT_NEOX = { "type": "gpt_neox", "name": "Gpt Neox", "url": "https://huggingface.co/EleutherAI/gpt-neox-20b", } GPTJ = { "type": "gptj", "name": "Gptj", "url": "https://huggingface.co/EleutherAI/gpt-j-6b", } IDEFICS = { "type": "idefics", "name": "Idefics", "url": "https://huggingface.co/HuggingFaceM4/idefics-9b", "multimodal": True, } MLLAMA = { "type": "mllama", "name": "Mllama", "url": "https://huggingface.co/meta-llama/Llama-3.2-11B-Vision-Instruct", "multimodal": True, } __GLOBALS = locals() for data in ModelType: __GLOBALS[data.name] = data.value["type"] def get_model( model_id: str, lora_adapter_ids: Optional[List[str]], revision: Optional[str], sharded: bool, quantize: Optional[str], speculate: Optional[int], dtype: Optional[str], kv_cache_dtype: Optional[str], trust_remote_code: bool, max_input_tokens: int, ) -> Model: global FLASH_ATTENTION config_dict, _ = PretrainedConfig.get_config_dict( model_id, revision=revision, trust_remote_code=trust_remote_code ) model_type = config_dict.get("model_type", None) quantization_config = config_dict.get("quantization_config", None) if quantization_config is None: quantization_config = config_dict.get("compression_config", None) if quantization_config is not None and quantize is None: method = quantization_config.get("quant_method", None) if method in {"gptq", "awq", "exl2"}: log_master(logger.info, f"Auto selecting quantization method {method}") quantize = method elif method == "fbgemm_fp8" or method == "fp8": log_master(logger.info, "Auto selecting quantization method fp8") quantize = "fp8" if method == "compressed-tensors": log_master( logger.info, "Auto selecting quantization method compressed-tensors" ) quantize = "compressed-tensors" else: log_master(logger.warning, f"Unknown quantization method {method}") if dtype is None: if quantize in ["awq", "exl2", "gptq", "marlin"]: if SYSTEM == "ipex" and not ( hasattr(torch, "xpu") and torch.xpu.is_available() ): dtype = torch.bfloat16 else: # These quantizers only work with float16 params. dtype = torch.float16 else: # Keep it as default for now and let # every model resolve their own default dtype. dtype = None elif dtype == "float16": dtype = torch.float16 elif dtype == "bfloat16": dtype = torch.bfloat16 else: raise RuntimeError(f"Unknown dtype {dtype}") compressed_tensors_config = None if quantize == "compressed-tensors": try: compressed_tensors_config = QuantizationConfig.model_validate( quantization_config ) except ValidationError as e: raise ValueError("Cannot parse compressed-tensors configuration") from e if kv_cache_dtype is None: kv_cache_scheme = ( compressed_tensors_config.kv_cache_scheme if isinstance(compressed_tensors_config, QuantizationConfig) else None ) if ( kv_cache_scheme is not None and kv_cache_scheme.type == QuantizationType.FLOAT and kv_cache_scheme.num_bits == 8 and SYSTEM == "cuda" and ATTENTION == "flashinfer" ): kv_cache_dtype = torch.float8_e4m3fn else: kv_cache_dtype = dtype elif kv_cache_dtype == "fp8_e4m3fn": kv_cache_dtype = torch.float8_e4m3fn elif kv_cache_dtype == "fp8_e5m2": kv_cache_dtype = torch.float8_e5m2 else: raise RuntimeError(f"Unknown kv_cache_dtype: {kv_cache_dtype}") if speculate is not None: set_speculate(speculate) else: set_speculate(0) speculator = None if "medusa_num_heads" in config_dict: medusa_model_id = model_id medusa_revision = revision model_id = config_dict["base_model_name_or_path"] revision = "main" speculate_medusa = config_dict["medusa_num_heads"] if speculate is not None: if speculate > speculate_medusa: raise RuntimeError( f"Speculate is set to `{speculate}` but this medusa models only has `{speculate_medusa}` heads, please make them match" ) else: set_speculate(speculate) else: set_speculate(speculate_medusa) config_dict, _ = PretrainedConfig.get_config_dict( model_id, revision=revision, trust_remote_code=trust_remote_code ) # Reload model type from parent. model_type = config_dict.get("model_type", None) is_local = Path(medusa_model_id).exists() if not is_local: medusa_config = hf_hub_download( medusa_model_id, revision=medusa_revision, filename="config.json" ) hf_hub_download( medusa_model_id, revision=medusa_revision, filename="medusa_lm_head.safetensors", ) speculator = { "path": Path(medusa_config).parent, "model_paths": ["medusa_lm_head.safetensors"], } else: speculator = { "path": Path(medusa_model_id), "model_paths": ["medusa_lm_head.safetensors"], } method = "medusa" elif model_type == "mlp_speculator": mlp_model_id = model_id mlp_revision = revision model_id = config_dict["base_model_name_or_path"] revision = "main" speculate_mlp = config_dict["n_predict"] if speculate is not None: if speculate > speculate_mlp: raise RuntimeError( f"Speculate is set to `{speculate}` but this mlp_speculator models only has `{speculate_mlp}` heads, please make them match" ) else: set_speculate(speculate) else: set_speculate(speculate_mlp) config_dict, _ = PretrainedConfig.get_config_dict( model_id, revision=revision, trust_remote_code=trust_remote_code ) # Reload model type from parent. model_type = config_dict.get("model_type", None) is_local = Path(mlp_model_id).exists() extension = ".safetensors" if not is_local: mlp_speculator_config = hf_hub_download( mlp_model_id, revision=mlp_revision, filename="config.json" ) api = HfApi() info = api.model_info(mlp_model_id, revision=mlp_revision) filenames = [ s.rfilename for s in info.siblings if s.rfilename.endswith(extension) and len(s.rfilename.split("/")) == 1 and "arguments" not in s.rfilename and "args" not in s.rfilename and "training" not in s.rfilename ] for filename in filenames: hf_hub_download( mlp_model_id, revision=mlp_revision, filename=filename, ) speculator_dir_path = Path(mlp_speculator_config).parent # if these are downloaded, they get converted to safetensors filenames.extend( [p for p in os.listdir(speculator_dir_path) if p.endswith(extension)] ) speculator = { "path": Path(mlp_speculator_config).parent, "model_paths": filenames, } else: speculator = Path(mlp_model_id) filenames = [p for p in os.listdir(speculator) if p.endswith(extension)] speculator = {"path": speculator, "model_paths": filenames} method = "mlp_speculator" else: method = "n-gram" speculate = get_speculate() if speculate > 0: log_master( logger.info, f"Using speculation {method} with {speculate} input ids." ) if model_type is None: # TODO: fix how we determine model type for Mamba if "ssm_cfg" in config_dict: # *only happens in Mamba case model_type = "mamba" else: raise RuntimeError( f"Could not determine model type for {model_id} revision {revision}" ) if quantize == "exl2" and sharded: raise RuntimeError( "Sharding is currently not supported with `exl2` quantization" ) sliding_window = ( config_dict.get("sliding_window") if config_dict.get("sliding_window") is not None else -1 ) use_sliding_window = sliding_window is not None and sliding_window != -1 needs_sliding_window = ( max_input_tokens is not None and max_input_tokens > sliding_window ) if use_sliding_window and needs_sliding_window and not SUPPORTS_WINDOWING: raise ValueError( f"The backend {SYSTEM} does not support sliding window attention that is used by the model type {model_type}. To use this model nonetheless with the {SYSTEM} backend, please launch TGI with the argument `--max-input-tokens` smaller than sliding_window={sliding_window} (got here max_input_tokens={max_input_tokens})." ) if model_type == DEEPSEEK_V2: if FLASH_ATTENTION: head_size = max( config_dict.get("qk_nope_dim", 128) + config_dict.get("qk_rope_dim", 64), config_dict.get("v_head_dim", 128), ) return FlashCausalLM( model_id=model_id, model_class=FlashDeepseekV2ForCausalLM, revision=revision, quantize=quantize, speculator=speculator, default_dtype=torch.bfloat16, dtype=dtype, kv_cache_dtype=kv_cache_dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, config_class=DeepseekV2Config, head_size=head_size, ) elif sharded: raise NotImplementedError( FLASH_ATT_ERROR_MESSAGE.format("Sharded Deepseek V2") ) else: return CausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) elif model_type == DEEPSEEK_V3: if FLASH_ATTENTION: head_size = max( config_dict.get("qk_nope_dim", 128) + config_dict.get("qk_rope_dim", 64), config_dict.get("v_head_dim", 128), ) return FlashCausalLM( model_id=model_id, model_class=FlashDeepseekV3ForCausalLM, revision=revision, quantize=quantize, speculator=speculator, default_dtype=torch.bfloat16, dtype=dtype, kv_cache_dtype=kv_cache_dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, config_class=DeepseekV3Config, head_size=head_size, ) elif sharded: raise NotImplementedError( FLASH_ATT_ERROR_MESSAGE.format("Sharded Deepseek V3") ) else: return CausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) elif model_type == MAMBA: return Mamba( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) elif model_type == "ssm": raise RuntimeError( "`ssm` models have been deprecated in favor of `mamba` models, which follow standard HF formats. Check out a list here: https://huggingface.co/models?search=mamba%20-hf" ) if model_id.startswith("facebook/galactica"): return CausalLM( model_id=model_id, # Yes galactica is just an OPT model. model_class=OPTForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, batch_class=GalacticaCausalLMBatch, ) if ( model_type == GPT_BIGCODE or model_type == GPT2 and model_id.startswith("bigcode/") ): if FLASH_ATTENTION: return FlashCausalLM( model_id=model_id, model_class=FlashSantacoderForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, kv_cache_dtype=kv_cache_dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, aliases={"transformer.wte.weight": ["lm_head.weight"]}, num_kv_heads=1, ) elif sharded: raise NotImplementedError( FLASH_ATT_ERROR_MESSAGE.format("Sharded Santacoder") ) else: return CausalLM.fallback( model_id=model_id, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type == BLOOM: return CausalLM( model_id=model_id, model_class=BloomForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, batch_class=BloomCausalLMBatch, ) elif model_type == MPT: return CausalLM( model_id=model_id, model_class=MPTForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, batch_class=CausalLMBatchKeysLast, ) elif model_type == GPT2: if FLASH_ATTENTION: try: return FlashCausalLM( model_id=model_id, model_class=FlashGPT2ForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, kv_cache_dtype=kv_cache_dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, ) except RuntimeError as e: # Lots of legacy models with various weight names. log_master(logger.warning, f"Couldn't load flash gpt2 variant: {e}") return CausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded GPT-2")) else: return CausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) elif model_type == GPTJ: if FLASH_ATTENTION: try: return FlashCausalLM( model_id=model_id, model_class=FlashGPTJForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, kv_cache_dtype=kv_cache_dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, ) except RuntimeError as e: # Lots of legacy models with various weight names. log_master(logger.warning, f"Couldn't load flash gptj variant: {e}") return CausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded GPT-J")) else: return CausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) elif model_type == GPT_NEOX: if FLASH_ATTENTION: from text_generation_server.models.custom_modeling.flash_neox_modeling import ( GPTNeoXConfig, ) return FlashCausalLM( model_id=model_id, model_class=FlashGPTNeoXForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, kv_cache_dtype=kv_cache_dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, config_class=GPTNeoXConfig, ) elif FLASH_TRANSFORMERS_BACKEND: return TransformersFlashCausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) elif sharded: return CausalLM( model_id=model_id, model_class=GPTNeoxForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) else: return CausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) elif model_type == PHI: if FLASH_ATTENTION: return FlashCausalLM( model_id=model_id, model_class=FlashPhiForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, kv_cache_dtype=kv_cache_dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, ) else: return TransformersFlashCausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) elif model_type == PHI_MOE: if FLASH_ATTENTION: return FlashCausalLM( model_id=model_id, model_class=FlashLlamaForCausalLM, config_class=PhiMoEConfig, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, kv_cache_dtype=kv_cache_dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, ) else: return CausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) elif model_type == "phi-msft": if FLASH_ATTENTION: raise NotImplementedError( "Legacy phi-msft is not supported with Flash Attention" ) else: return CausalLM( model_id=model_id, model_class=PhiForCausalLM, config_class=PhiConfig, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) elif model_type == LLAMA or model_type == PHI3 or model_type == GRANITE: if FLASH_ATTENTION: return FlashCausalLM( model_id=model_id, model_class=FlashLlamaForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, kv_cache_dtype=kv_cache_dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, ) elif FLASH_TRANSFORMERS_BACKEND: return TransformersFlashCausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) elif sharded: raise NotImplementedError( FLASH_ATT_ERROR_MESSAGE.format(f"Sharded {model_type}") ) else: return CausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) elif model_type == LLAMA4: if FLASH_TRANSFORMERS_BACKEND: from transformers import Llama4ForConditionalGeneration as Llama4Model return TransformersLlama4VlmCausalLM.fallback( model_id, Llama4Model, revision, quantize=quantize, speculator=speculator, dtype=torch.bfloat16, trust_remote_code=trust_remote_code, processor_kwargs={ "use_fast": True, }, ) elif model_type == BAICHUAN: if FLASH_ATTENTION: return FlashCausalLM( model_id=model_id, model_class=FlashLlamaForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, kv_cache_dtype=kv_cache_dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, ) elif sharded: raise NotImplementedError( FLASH_ATT_ERROR_MESSAGE.format(f"Sharded {model_type}") ) else: return CausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type == GEMMA: if FLASH_ATTENTION: return FlashCausalLM( model_id=model_id, model_class=FlashGemmaForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, kv_cache_dtype=kv_cache_dtype, # Works better for these models default_dtype=torch.bfloat16, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, ) elif FLASH_TRANSFORMERS_BACKEND: return TransformersFlashCausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded Gemma")) else: return CausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) elif model_type == GEMMA2: if FLASH_ATTENTION: return FlashCausalLM( model_id=model_id, model_class=FlashGemma2ForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, kv_cache_dtype=kv_cache_dtype, # Works better for these models default_dtype=torch.bfloat16, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, ) elif FLASH_TRANSFORMERS_BACKEND: return TransformersFlashCausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded Gemma2")) else: return CausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) elif model_type == GEMMA3_TEXT: if FLASH_ATTENTION: return FlashCausalLM( model_id=model_id, model_class=FlashGemma3ForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, kv_cache_dtype=kv_cache_dtype, # TODO: once implemented in transformers, use the config class # and processor class from there. config_class=Gemma3TextConfig, # Works better for these models default_dtype=torch.bfloat16, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, ) elif FLASH_TRANSFORMERS_BACKEND: return TransformersFlashCausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded Gemma3")) else: return CausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) elif model_type == GEMMA3: if FLASH_ATTENTION: return VlmCausalLM( model_id=model_id, model_class=Gemma3ForConditionalGeneration, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, kv_cache_dtype=kv_cache_dtype, # TODO: once implemented in transformers, use the config class # and processor class from there. config_class=Gemma3Config, processor_class=Gemma3Processor, default_dtype=torch.bfloat16, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, support_chunking=False, ) elif FLASH_TRANSFORMERS_BACKEND: from transformers import Gemma3ForConditionalGeneration as Gemma3Model return TransformersGemma3VlmCausalLM.fallback( model_id, Gemma3Model, revision, quantize=quantize, speculator=speculator, dtype=torch.bfloat16, trust_remote_code=trust_remote_code, support_chunking=False, ) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded Gemma3")) else: return CausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type == COHERE: if FLASH_ATTENTION: return FlashCausalLM( model_id=model_id, model_class=FlashCohereForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, kv_cache_dtype=kv_cache_dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, ) elif FLASH_TRANSFORMERS_BACKEND: return TransformersFlashCausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded Cohere")) else: return CausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type == DBRX: if FLASH_ATTENTION: return FlashCausalLM( model_id=model_id, model_class=FlashDbrxForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, kv_cache_dtype=kv_cache_dtype, # Dbrx works better in bfloat16. default_dtype=torch.bfloat16, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, config_class=DbrxConfig, ) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded DBRX")) else: return CausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type in ["RefinedWeb", "RefinedWebModel", FALCON]: if sharded: if FLASH_ATTENTION: if config_dict.get("alibi", False): raise NotImplementedError("sharded is not supported for this model") return FlashCausalLM( model_id=model_id, model_class=FlashRWForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, kv_cache_dtype=kv_cache_dtype, aliases={ "lm_head.weight": ["transformer.word_embeddings.weight"], "transformer.word_embeddings.weight": ["lm_head.weight"], }, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, config_class=RWConfig, ) raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded Falcon")) else: if FLASH_ATTENTION and not config_dict.get("alibi", False): return FlashCausalLM( model_id=model_id, model_class=FlashRWForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, kv_cache_dtype=kv_cache_dtype, aliases={ "lm_head.weight": ["transformer.word_embeddings.weight"], "transformer.word_embeddings.weight": ["lm_head.weight"], }, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, config_class=RWConfig, ) else: return CausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type == MISTRAL: if FLASH_ATTENTION: return FlashCausalLM( model_id=model_id, model_class=FlashMistralForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, kv_cache_dtype=kv_cache_dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, ) elif FLASH_TRANSFORMERS_BACKEND: return TransformersFlashCausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded Mistral")) else: return CausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type == MIXTRAL: if FLASH_ATTENTION: return FlashCausalLM( model_id=model_id, model_class=FlashMixtralForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, kv_cache_dtype=kv_cache_dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, ) elif FLASH_TRANSFORMERS_BACKEND: return TransformersFlashCausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded Mixtral")) else: return CausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type == STARCODER2: if FLASH_ATTENTION: return FlashCausalLM( model_id=model_id, model_class=FlashStarcoder2ForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, kv_cache_dtype=kv_cache_dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, ) elif FLASH_TRANSFORMERS_BACKEND: return TransformersFlashCausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) elif sharded: raise NotImplementedError( FLASH_ATT_ERROR_MESSAGE.format("Sharded Starcoder2") ) else: return CausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type == QWEN2: if FLASH_ATTENTION: return FlashCausalLM( model_id=model_id, model_class=Qwen2ForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, kv_cache_dtype=kv_cache_dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, ) elif FLASH_TRANSFORMERS_BACKEND: return TransformersFlashCausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded Qwen2")) else: return CausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type == OPT: return CausalLM( model_id=model_id, model_class=OPTForCausalLM, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type == T5: return Seq2SeqLM( model_id=model_id, model_class=T5ForConditionalGeneration, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, aliases={ "shared.weight": [ "encoder.embed_tokens.weight", "decoder.embed_tokens.weight", ] }, ) if model_type == IDEFICS: if FLASH_ATTENTION: return IdeficsCausalLM( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) else: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Idefics")) if model_type == QWEN2_VL: if FLASH_ATTENTION: return VlmCausalLM( model_id=model_id, model_class=Qwen2VLForConditionalGeneration, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, default_dtype=torch.bfloat16, kv_cache_dtype=kv_cache_dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, # TODO: Fix bug in rust image_text_replacement implementation support_chunking=False, ) # TODO: Uncomment when transformers is refactored # elif FLASH_TRANSFORMERS_BACKEND: # from transformers import Qwen2VLForConditionalGeneration as Qwen2VLModel # return TransformersQwen2VlmCausalLM.fallback( # model_id, # Qwen2VLModel, # revision, # quantize=quantize, # speculator=speculator, # dtype=torch.bfloat16, # trust_remote_code=trust_remote_code, # ) else: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Qwen2_VL")) if model_type == QWEN2_5_VL: if FLASH_ATTENTION: return VlmCausalLM( model_id=model_id, model_class=Qwen2_5VLForConditionalGeneration, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, default_dtype=torch.bfloat16, kv_cache_dtype=kv_cache_dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, config_class=Qwen2_5_VLConfig, processor_class=Qwen2_5_VLProcessor, # TODO: Fix bug in rust image_text_replacement implementation support_chunking=False, ) # TODO: Uncomment when transformers is refactored # elif FLASH_TRANSFORMERS_BACKEND: # return TransformersQwen2VlmCausalLM.fallback( # model_id, # Qwen2VLModel, # revision, # quantize=quantize, # speculator=speculator, # dtype=torch.bfloat16, # trust_remote_code=trust_remote_code, # config_class=Qwen2_5_VLConfig, # processor_class=Qwen2_5_VLProcessor, # ) else: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Qwen2_5_VL")) if model_type == MLLAMA: if FLASH_ATTENTION: return MllamaCausalLM( model_id=model_id, model_class=MllamaForConditionalGeneration, batch_class=MllamaCausalLMBatch, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, default_dtype=torch.bfloat16, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, support_chunking=False, ) # TODO: Uncomment when transformers is refactored and cross attn is added # elif FLASH_TRANSFORMERS_BACKEND: # from transformers import MllamaForConditionalGeneration as MllamaModel # return TransformersFlashVlmCausalLM.fallback( # model_id, # MllamaModel, # revision, # quantize=quantize, # speculator=speculator, # dtype=torch.bfloat16, # trust_remote_code=trust_remote_code, # batch_class=MllamaCausalLMBatch, # ) else: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Mllama")) if model_type == IDEFICS2: if FLASH_ATTENTION: return VlmCausalLM( model_id=model_id, model_class=Idefics2ForConditionalGeneration, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, kv_cache_dtype=kv_cache_dtype, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, # XXX: Extremely important to cap resolution in order to limit # VRAM usage. processor_kwargs={"size": {"longest_edge": 448, "shortest_edge": 378}}, ) elif FLASH_TRANSFORMERS_BACKEND: from transformers import Idefics2ForConditionalGeneration as Idefics2Model return TransformersFlashVlmCausalLM.fallback( model_id, Idefics2Model, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, processor_kwargs={"size": {"longest_edge": 448, "shortest_edge": 378}}, ) else: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Idefics")) if model_type == IDEFICS3: if FLASH_ATTENTION: return VlmCausalLM( model_id=model_id, model_class=Idefics3ForConditionalGeneration, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, default_dtype=torch.bfloat16, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, # XXX: Extremely important to cap resolution in order to limit # VRAM usage. processor_kwargs={"size": {"longest_edge": 1456}}, ) elif FLASH_TRANSFORMERS_BACKEND: from transformers import Idefics3ForConditionalGeneration as Idefics3Model return TransformersFlashVlmCausalLM.fallback( model_id, Idefics3Model, revision, quantize=quantize, speculator=speculator, dtype=torch.bfloat16, trust_remote_code=trust_remote_code, processor_kwargs={"size": {"longest_edge": 1456}}, ) else: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Idefics")) if model_type == PALIGEMMA: if FLASH_ATTENTION: return VlmCausalLM( model_id=model_id, model_class=PaliGemmaForConditionalGeneration, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, kv_cache_dtype=kv_cache_dtype, # Works better for these models default_dtype=torch.bfloat16, trust_remote_code=trust_remote_code, lora_adapter_ids=lora_adapter_ids, ) elif FLASH_TRANSFORMERS_BACKEND: from transformers import PaliGemmaForConditionalGeneration as PaliGemmaModel return TransformersFlashVlmCausalLM.fallback( model_id, PaliGemmaModel, revision, quantize=quantize, speculator=speculator, dtype=torch.bfloat16, trust_remote_code=trust_remote_code, ) else: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("PaliGemma")) if model_type == LLAVA_NEXT: if FLASH_ATTENTION: return VlmCausalLM( model_class=LlavaNextForConditionalGeneration, model_id=model_id, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, kv_cache_dtype=kv_cache_dtype, trust_remote_code=trust_remote_code, ) elif FLASH_TRANSFORMERS_BACKEND: from transformers import LlavaNextForConditionalGeneration as LlavaNextModel return TransformersFlashVlmCausalLM.fallback( model_id, LlavaNextModel, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) else: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("LlavaNext")) if quantize == "gptq": raise NotImplementedError( "gptq quantization is not supported for AutoModel, you can try to quantize it with `text-generation-server quantize ORIGINAL_MODEL_ID NEW_MODEL_ID`" ) if quantize == "awq": raise NotImplementedError("awq quantization is not supported for AutoModel") elif (quantize == "bitsandbytes-fp4") or (quantize == "bitsandbytes-nf4"): raise NotImplementedError("4bit quantization is not supported for AutoModel") elif quantize == "eetq": raise NotImplementedError("Eetq quantization is not supported for AutoModel") elif quantize == "exl2": raise NotImplementedError("exl2 quantization is not supported for AutoModel") auto_map = config_dict.get("auto_map", None) model_class = None # If the model is already in the library if model_type in modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES: model_class = getattr( transformers, modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES[model_type] ) elif ( trust_remote_code and auto_map is not None and "AutoModelForCausalLM" in auto_map.keys() ): model_class = get_class_from_dynamic_module( config_dict["auto_map"]["AutoModelForCausalLM"], model_id ) # This means the model is ForCausalLM if model_class is not None: if FLASH_TRANSFORMERS_BACKEND and model_class.is_backend_compatible(): return TransformersFlashCausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) elif sharded: raise NotImplementedError("sharded is not supported for AutoModel") else: return CausalLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) # Not supported at this point if sharded: raise NotImplementedError("sharded is not supported for AutoModel") # This means it is a ForSeq2SeqLM model if model_type in modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES or ( trust_remote_code and auto_map is not None and "AutoModelForSeq2SeqLM" in auto_map.keys() ): return Seq2SeqLM.fallback( model_id, revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) raise ValueError(f"Unsupported model type {model_type}") # get_model_with_lora_adapters wraps the internal get_model function and adds support for loading adapters # this provides a post model loading hook to load adapters into the model after the model has been loaded def get_model_with_lora_adapters( model_id: str, lora_adapters: Optional[List[AdapterInfo]], revision: Optional[str], sharded: bool, quantize: Optional[str], speculate: Optional[int], dtype: Optional[str], kv_cache_dtype: Optional[str], trust_remote_code: bool, max_input_tokens: int, adapter_to_index: Dict[str, int], ): lora_adapter_ids = [adapter.id for adapter in lora_adapters] model = get_model( model_id, lora_adapter_ids, revision, sharded, quantize, speculate, dtype, kv_cache_dtype, trust_remote_code, max_input_tokens, ) if len(lora_adapters) > 0: target_to_layer = build_layer_weight_lookup(model.model) for index, adapter in enumerate(lora_adapters): # The AdapterParameters object allows for merging multiple adapters into a single adapter. # At the moment, we only support loading a single adapter into the model, but we keep the # AdapterParameters object for easier extension in the future. adapter_parameters = AdapterParameters( adapter_info=[adapter], # when merging multiple adapters we can weight them differently # if this is not set, all adapters will be weighted equally # see: text_generation_server.utils.merges.strategies for impl weights=None, merge_strategy=0, density=1.0, majority_sign_method=0, ) adapter_index = index + 1 adapter_to_index[adapter.id] = adapter_index logger.info( f"Loading adapter weights into model: {','.join([adapter.id for adapter in adapter_parameters.adapter_info])}" ) weight_names = tuple([v[0] for v in target_to_layer.values()]) ( module_map, adapter_config, adapter_weight_names, adapter_tokenizer, ) = load_and_merge_adapters( model.model_id, adapter_parameters, adapter_index, weight_names, False, ) unused_weight_names = adapter_weight_names.copy() adapter_layers = [ "q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj", "qkv_proj", # add c_* layers used in starcoder2 "c_proj", "c_fc", ] for layer_name in adapter_layers: nlayers = ( 1 if layer_name == "lm_head" else len(model.model.model.layers) ) adapter_weights = LoraWeights.prepare_weights( config=adapter_config, module_map=module_map, layer_type=layer_name, unused_weight_names=unused_weight_names, nlayers=nlayers, dtype=model.dtype, world_size=model.world_size, process_group=model.process_group, target_to_layer=target_to_layer, ) if adapter_weights is None: continue model.layer_to_adapter_weights[layer_name].add_adapter( adapter_index, adapter_weights ) if len(unused_weight_names) > 0: logger.warning( f"{','.join([a.id for a in lora_adapters])} unused adapter weights: {unused_weight_names}" ) if adapter_tokenizer is not None: model.tokenizers.add_tokenizer(adapter_index, adapter_tokenizer) model.loaded_adapters.add(adapter_index) return model
text-generation-inference/server/text_generation_server/models/__init__.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/__init__.py", "repo_id": "text-generation-inference", "token_count": 37327 }
311
# coding=utf-8 # Copyright 2018 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch T5 model.""" import copy import math import warnings from typing import Optional, Tuple, Union from loguru import logger import torch import torch.distributed from torch import nn from torch.nn import CrossEntropyLoss from transformers.activations import ACT2FN from transformers.modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, ) from transformers.modeling_utils import PreTrainedModel from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS from transformers.utils import ( is_torch_fx_proxy, ) from transformers import T5Config from text_generation_server.layers import ( TensorParallelColumnLinear, TensorParallelEmbedding, TensorParallelRowLinear, SpeculativeHead, ) # copied from https://github.com/huggingface/transformers/blob/cd4584e3c809bb9e1392ccd3fe38b40daba5519a/src/transformers/models/t5/modeling_t5.py#L1316 # Warning message for FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask __HEAD_MASK_WARNING_MSG = """ The input argument `head_mask` was split into two arguments `head_mask` and `decoder_head_mask`. Currently, `decoder_head_mask` is set to copy `head_mask`, but this feature is deprecated and will be removed in future versions. If you do not want to use any `decoder_head_mask` now, please set `decoder_head_mask = torch.ones(num_layers, num_heads)`. """ class PartialTPEmbedding(nn.Module): def __init__(self, prefix: str, weights): super().__init__() weight = weights.get_sharded(f"{prefix}.weight", dim=1) self.weight = nn.Parameter(weight) def forward(self, input: torch.Tensor) -> torch.Tensor: return torch.nn.functional.embedding(input, self.weight) @torch.jit.script def layer_norm(hidden_states, weight, epsilon): # T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for # half-precision inputs is done in fp32 variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + epsilon) # convert into half-precision if necessary if weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(weight.dtype) return weight * hidden_states class T5LayerNorm(nn.Module): def __init__(self, prefix, weights, eps=1e-6): """ Construct a layernorm module in the T5 style. No bias and no subtraction of mean. """ super().__init__() weight = weights.get_tensor(f"{prefix}.weight") self.weight = nn.Parameter(weight) self.variance_epsilon = torch.tensor(eps) def forward(self, hidden_states): return layer_norm(hidden_states, self.weight, self.variance_epsilon) try: from apex.normalization import FusedRMSNorm T5LayerNorm = FusedRMSNorm # noqa logger.info( "Discovered apex.normalization.FusedRMSNorm - will use it instead of T5LayerNorm" ) except ImportError: # using the normal T5LayerNorm pass except Exception: logger.warning("discovered apex but it failed to load, falling back to T5LayerNorm") pass ALL_LAYERNORM_LAYERS.append(T5LayerNorm) class T5DenseActDense(nn.Module): def __init__(self, config: T5Config, prefix, weights): super().__init__() self.wi = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.wi", weights=weights, bias=False ) ### XXX: T5 models do not handle well both f16 and quantization. ### Overidding specifically this layer for that reason. ### https://github.com/huggingface/transformers/blob/main/src/transformers/models/t5/modeling_t5.py#L316 ### https://github.com/huggingface/transformers/issues/20287 _q = config.quantize _dtype = weights.dtype weights.dtype = torch.float32 config.quantize = None self.wo_cast = (torch.float32, _dtype) self.wo = TensorParallelRowLinear.load( config, prefix=f"{prefix}.wo", weights=weights, bias=False ) weights.dtype = _dtype config.quantize = _q self.dropout = nn.Dropout(config.dropout_rate) self.act = ( ACT2FN[config.dense_act_fn] if "gelu" not in config.dense_act_fn else lambda x: torch.nn.functional.gelu(x, approximate="tanh") ) def forward(self, hidden_states): hidden_states = self.wi(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states.to(dtype=self.wo_cast[0]) hidden_states = self.wo(hidden_states) # XXX: Recasting is already done within the layer norm. # Casting back to float16 here modifies results # hidden_states = hidden_states.to(dtype=self.wo_cast[1]) return hidden_states class T5DenseGatedActDense(nn.Module): def __init__(self, config: T5Config, prefix, weights): super().__init__() self.wi_0 = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.wi_0", weights=weights, bias=False ) self.wi_1 = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.wi_1", weights=weights, bias=False ) ### XXX: T5 models do not handle well both f16 and quantization. ### Overidding specifically this layer for that reason. ### https://github.com/huggingface/transformers/blob/main/src/transformers/models/t5/modeling_t5.py#L316 ### https://github.com/huggingface/transformers/issues/20287 _q = config.quantize _dtype = weights.dtype weights.dtype = torch.float32 config.quantize = None self.wo_cast = (torch.float32, _dtype) self.wo = TensorParallelRowLinear.load( config, prefix=f"{prefix}.wo", weights=weights, bias=False ) weights.dtype = _dtype config.quantize = _q self.dropout = nn.Dropout(config.dropout_rate) self.act = ( ACT2FN[config.dense_act_fn] if "gelu" not in config.dense_act_fn else lambda x: torch.nn.functional.gelu(x, approximate="tanh") ) def forward(self, hidden_states): hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) hidden_states = hidden_states.to(dtype=self.wo_cast[0]) hidden_states = self.wo(hidden_states) # XXX: Recasting is already done within the layer norm. # Casting back to float16 here modifies results # hidden_states = hidden_states.to(dtype=self.wo_cast[1]) return hidden_states class T5LayerFF(nn.Module): def __init__(self, config: T5Config, prefix, weights): super().__init__() if config.is_gated_act: self.DenseReluDense = T5DenseGatedActDense( config, prefix=f"{prefix}.DenseReluDense", weights=weights ) else: self.DenseReluDense = T5DenseActDense( config, prefix=f"{prefix}.DenseReluDense", weights=weights ) self.layer_norm = T5LayerNorm( prefix=f"{prefix}.layer_norm", weights=weights, eps=config.layer_norm_epsilon, ) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states): forwarded_states = self.layer_norm(hidden_states) forwarded_states = self.DenseReluDense(forwarded_states) hidden_states = hidden_states + self.dropout(forwarded_states) return hidden_states class T5Attention(nn.Module): def __init__( self, config: T5Config, prefix, weights, has_relative_attention_bias=False ): super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim process_group = weights.process_group # Mesh TensorFlow initialization to avoid scaling before softmax assert self.n_heads % process_group.size() == 0 self.q = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.q", weights=weights, bias=False ) self.k = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.k", weights=weights, bias=False ) self.v = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.v", weights=weights, bias=False ) self.o = TensorParallelRowLinear.load( config, prefix=f"{prefix}.o", weights=weights, bias=False ) if self.n_heads % weights.process_group.size() != 0: raise ValueError( f"`n_heads` must be divisible by `num_shards` (got `n_heads`: {self.n_heads} " f"and `num_shards`: {weights.process_group.size()}" ) self.n_heads = self.n_heads // process_group.size() self.inner_dim = self.inner_dim // process_group.size() if self.has_relative_attention_bias: self.relative_attention_bias = PartialTPEmbedding( prefix=f"{prefix}.relative_attention_bias", weights=weights ) @staticmethod def _relative_position_bucket( relative_position, bidirectional=True, num_buckets=32, max_distance=128 ): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) """ relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min( relative_position, torch.zeros_like(relative_position) ) # now relative_position is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = relative_position < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance relative_position_if_large = max_exact + ( torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) relative_position_if_large = torch.min( relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1), ) relative_buckets += torch.where( is_small, relative_position, relative_position_if_large ) return relative_buckets def compute_bias(self, query_length, key_length, device=None): """Compute binned relative position bias""" if device is None: device = self.relative_attention_bias.weight.device context_position = torch.arange(query_length, dtype=torch.long, device=device)[ :, None ] memory_position = torch.arange(key_length, dtype=torch.long, device=device)[ None, : ] relative_position = ( memory_position - context_position ) # shape (query_length, key_length) relative_position_bucket = self._relative_position_bucket( relative_position, # shape (query_length, key_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) values = self.relative_attention_bias( relative_position_bucket ) # shape (query_length, key_length, num_heads) values = values.permute([2, 0, 1]).unsqueeze( 0 ) # shape (1, num_heads, query_length, key_length) return values def forward( self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_value=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False, ): """ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states). """ # Input is (batch_size, seq_length, dim) # Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length) # past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head) batch_size, seq_length = hidden_states.shape[:2] real_seq_length = seq_length if past_key_value is not None: assert ( len(past_key_value) == 2 ), f"past_key_value should have 2 past states: keys and values. Got {len(past_key_value)} past states" real_seq_length += ( past_key_value[0].shape[2] if query_length is None else query_length ) key_length = ( real_seq_length if key_value_states is None else key_value_states.shape[1] ) def shape(states): """projection""" return states.view( batch_size, -1, self.n_heads, self.key_value_proj_dim ).transpose(1, 2) def unshape(states): """reshape""" return ( states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim) ) def project(hidden_states, proj_layer, key_value_states, past_key_value): """projects hidden states correctly to key/query states""" if key_value_states is None: # self-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(hidden_states)) elif past_key_value is None: # cross-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(key_value_states)) if past_key_value is not None: if key_value_states is None: # self-attn # (batch_size, n_heads, key_length, dim_per_head) hidden_states = torch.cat([past_key_value, hidden_states], dim=2) elif past_key_value.shape[2] != key_value_states.shape[1]: # checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning # cross-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(key_value_states)) else: # cross-attn hidden_states = past_key_value return hidden_states # get query states query_states = shape( self.q(hidden_states) ) # (batch_size, n_heads, seq_length, dim_per_head) # get key/value states key_states = project( hidden_states, self.k, key_value_states, past_key_value[0] if past_key_value is not None else None, ) value_states = project( hidden_states, self.v, key_value_states, past_key_value[1] if past_key_value is not None else None, ) # compute scores scores = torch.matmul( query_states, key_states.transpose(3, 2) ) # equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9 if position_bias is None: if not self.has_relative_attention_bias: position_bias = torch.zeros( (1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype, ) else: position_bias = self.compute_bias( real_seq_length, key_length, device=scores.device ) # if key and values are already calculated # we want only the last query position bias if past_key_value is not None: position_bias = position_bias[:, :, -hidden_states.size(1) :, :] if mask is not None: position_bias = ( position_bias + mask ) # (batch_size, n_heads, seq_length, key_length) position_bias_masked = position_bias scores += position_bias_masked attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as( scores ) # (batch_size, n_heads, seq_length, key_length) attn_weights = nn.functional.dropout( attn_weights, p=self.dropout, training=self.training ) # (batch_size, n_heads, seq_length, key_length) # Mask heads if we want to if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_output = unshape( torch.matmul(attn_weights, value_states) ) # (batch_size, seq_length, dim) attn_output = self.o(attn_output) present_key_value_state = ( (key_states, value_states) if (self.is_decoder and use_cache) else None ) outputs = (attn_output,) + (present_key_value_state,) + (position_bias,) if output_attentions: outputs = outputs + (attn_weights,) return outputs class T5LayerSelfAttention(nn.Module): def __init__(self, config, prefix, weights, has_relative_attention_bias=False): super().__init__() self.SelfAttention = T5Attention( config, prefix=f"{prefix}.SelfAttention", weights=weights, has_relative_attention_bias=has_relative_attention_bias, ) self.layer_norm = T5LayerNorm( prefix=f"{prefix}.layer_norm", weights=weights, eps=config.layer_norm_epsilon, ) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.SelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[ 1: ] # add attentions if we output them return outputs class T5LayerCrossAttention(nn.Module): def __init__(self, config, prefix, weights): super().__init__() self.EncDecAttention = T5Attention( config, prefix=f"{prefix}.EncDecAttention", weights=weights, has_relative_attention_bias=False, ) self.layer_norm = T5LayerNorm( prefix=f"{prefix}.layer_norm", weights=weights, eps=config.layer_norm_epsilon, ) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, query_length=None, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.EncDecAttention( normed_hidden_states, mask=attention_mask, key_value_states=key_value_states, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, query_length=query_length, output_attentions=output_attentions, ) layer_output = hidden_states + self.dropout(attention_output[0]) outputs = (layer_output,) + attention_output[ 1: ] # add attentions if we output them return outputs class T5Block(nn.Module): def __init__(self, config, prefix, weights, has_relative_attention_bias: bool): super().__init__() self.is_decoder = config.is_decoder self.layer = nn.ModuleList() self.layer.append( T5LayerSelfAttention( config, prefix=f"{prefix}.layer.0", weights=weights, has_relative_attention_bias=has_relative_attention_bias, ) ) if self.is_decoder: i = 2 self.layer.append( T5LayerCrossAttention( config, prefix=f"{prefix}.layer.1", weights=weights ) ) else: i = 1 self.layer.append( T5LayerFF(config, prefix=f"{prefix}.layer.{i}", weights=weights) ) def forward( self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, return_dict=True, ): if past_key_value is not None: if not self.is_decoder: logger.warning( "`past_key_values` is passed to the encoder. Please make sure this is intended." ) expected_num_past_key_values = 2 if encoder_hidden_states is None else 4 if len(past_key_value) != expected_num_past_key_values: raise ValueError( f"There should be {expected_num_past_key_values} past states. " f"{'2 (past / key) for cross attention. ' if expected_num_past_key_values == 4 else ''}" f"Got {len(past_key_value)} past key / value states" ) self_attn_past_key_value = past_key_value[:2] cross_attn_past_key_value = past_key_value[2:] else: self_attn_past_key_value, cross_attn_past_key_value = None, None self_attention_outputs = self.layer[0]( hidden_states, attention_mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=self_attn_past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states, present_key_value_state = self_attention_outputs[:2] attention_outputs = self_attention_outputs[ 2: ] # Keep self-attention outputs and relative position weights # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16: clamp_value = torch.where( torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000, torch.finfo(hidden_states.dtype).max, ) hidden_states = torch.clamp( hidden_states, min=-clamp_value, max=clamp_value ) do_cross_attention = self.is_decoder and encoder_hidden_states is not None if do_cross_attention: # the actual query length is unknown for cross attention # if using past key value states. Need to inject it here if present_key_value_state is not None: query_length = present_key_value_state[0].shape[2] else: query_length = None cross_attention_outputs = self.layer[1]( hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, query_length=query_length, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = cross_attention_outputs[0] # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16: clamp_value = torch.where( torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000, torch.finfo(hidden_states.dtype).max, ) hidden_states = torch.clamp( hidden_states, min=-clamp_value, max=clamp_value ) # Combine self attn and cross attn key value states if present_key_value_state is not None: present_key_value_state = ( present_key_value_state + cross_attention_outputs[1] ) # Keep cross-attention outputs and relative position weights attention_outputs = attention_outputs + cross_attention_outputs[2:] # Apply Feed Forward layer hidden_states = self.layer[-1](hidden_states) # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16: clamp_value = torch.where( torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000, torch.finfo(hidden_states.dtype).max, ) hidden_states = torch.clamp( hidden_states, min=-clamp_value, max=clamp_value ) outputs = (hidden_states,) if use_cache: outputs = outputs + (present_key_value_state,) + attention_outputs else: outputs = outputs + attention_outputs return outputs # hidden-states, present_key_value_states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights) class T5PreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = T5Config def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id assert decoder_start_token_id is not None, ( "self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id." " See T5 docs for more information" ) # shift inputs to the right if is_torch_fx_proxy(input_ids): # Item assignment is not supported natively for proxies. shifted_input_ids = torch.full( input_ids.shape[:-1] + (1,), decoder_start_token_id ) shifted_input_ids = torch.cat( [shifted_input_ids, input_ids[..., :-1]], dim=-1 ) else: shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id assert ( pad_token_id is not None ), "self.model.config.pad_token_id has to be defined." # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids class T5Stack(T5PreTrainedModel): def __init__(self, config, prefix, weights, embed_tokens): super().__init__(config) self.is_decoder = config.is_decoder self.embed_tokens = embed_tokens self.block = nn.ModuleList( [ T5Block( config, prefix=f"{prefix}.block.{layer_id}", weights=weights, has_relative_attention_bias=(layer_id == 0), ) for layer_id in range(config.num_layers) ] ) self.final_layer_norm = T5LayerNorm( prefix=f"{prefix}.final_layer_norm", weights=weights, eps=config.layer_norm_epsilon, ) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): # Model parallel use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = ( output_attentions if output_attentions is not None else self.config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) if input_ids is not None and inputs_embeds is not None: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError( f"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time" ) elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError( f"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds" ) if inputs_embeds is None: assert ( self.embed_tokens is not None ), "You have to initialize the model with valid token embeddings" inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape # required mask seq length can be calculated via length of past mask_seq_length = ( past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length ) if use_cache is True: assert ( self.is_decoder ), f"`use_cache` can only be set to `True` if {self} is used as a decoder" if attention_mask is None: attention_mask = torch.ones( batch_size, mask_seq_length, device=inputs_embeds.device ) if ( self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is not None ): encoder_seq_length = encoder_hidden_states.shape[1] encoder_attention_mask = torch.ones( batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long, ) # initialize past_key_values with `None` if past does not exist if past_key_values is None: past_key_values = [None] * len(self.block) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask = self.get_extended_attention_mask( attention_mask, input_shape ) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.is_decoder and encoder_hidden_states is not None: ( encoder_batch_size, encoder_sequence_length, _, ) = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones( encoder_hidden_shape, device=inputs_embeds.device ) encoder_extended_attention_mask = self.invert_attention_mask( encoder_attention_mask ) else: encoder_extended_attention_mask = None # Prepare head mask if needed head_mask = self.get_head_mask(head_mask, self.config.num_layers) cross_attn_head_mask = self.get_head_mask( cross_attn_head_mask, self.config.num_layers ) present_key_value_states = () if use_cache else None all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if (output_attentions and self.is_decoder) else None position_bias = None encoder_decoder_position_bias = None hidden_states = self.dropout(inputs_embeds) for i, (layer_module, past_key_value) in enumerate( zip(self.block, past_key_values) ): layer_head_mask = head_mask[i] cross_attn_layer_head_mask = cross_attn_head_mask[i] # Model parallel if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states, attention_mask=extended_attention_mask, position_bias=position_bias, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, encoder_decoder_position_bias=encoder_decoder_position_bias, layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) # layer_outputs is a tuple with: # hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights) if use_cache is False: layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:] hidden_states, present_key_value_state = layer_outputs[:2] # We share the position biases between the layers - the first layer store them # layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights), # (cross-attention position bias), (cross-attention weights) position_bias = layer_outputs[2] if self.is_decoder and encoder_hidden_states is not None: encoder_decoder_position_bias = layer_outputs[ 4 if output_attentions else 3 ] # append next layer key value states if use_cache: present_key_value_states = present_key_value_states + ( present_key_value_state, ) if output_attentions: all_attentions = all_attentions + (layer_outputs[3],) if self.is_decoder: all_cross_attentions = all_cross_attentions + (layer_outputs[5],) hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, present_key_value_states, all_hidden_states, all_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=present_key_value_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) class T5ForConditionalGeneration(T5PreTrainedModel): def __init__(self, config: T5Config, weights): super().__init__(config) self.model_dim = config.d_model self.shared = TensorParallelEmbedding(prefix="shared", weights=weights) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = T5Stack( config=encoder_config, prefix="encoder", weights=weights, embed_tokens=self.shared, ) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = T5Stack( config=decoder_config, prefix="decoder", weights=weights, embed_tokens=self.shared, ) try: self.lm_head = SpeculativeHead.load( config, prefix="lm_head", weights=weights ) except RuntimeError: # Some models like t5-small were saved with shared weights unlike flan # Since they are declared as the same arch we have no choice but hope # that this is OK instead of using a proper flag. self.lm_head = SpeculativeHead.load( config, prefix="shared", weights=weights ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]: use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask if head_mask is not None and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask # Encode if needed (training, first prediction pass) if encoder_outputs is None: # Convert encoder inputs in embeddings if needed encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] if ( labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None ): # get decoder inputs from shifting lm labels to the right decoder_input_ids = self._shift_right(labels) # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = decoder_outputs[0] if self.config.tie_word_embeddings: # Rescale output before projecting on vocab # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 sequence_output = sequence_output * (self.model_dim**-0.5) logits, speculative_logits = self.lm_head(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-100) # move labels to correct device to enable PP labels = labels.to(logits.device) loss = loss_fct(logits.view(-1, logits.size(-1)), labels.view(-1)) # TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666 if not return_dict: output = (logits,) + decoder_outputs[1:] + encoder_outputs return ((loss,) + output) if loss is not None else output return ( Seq2SeqLMOutput( loss=loss, logits=logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ), speculative_logits, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, head_mask=None, decoder_head_mask=None, decoder_attention_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs, ): # cut decoder_input_ids if past is used if past_key_values is not None: input_ids = input_ids[:, -1:] return { "decoder_input_ids": input_ids, "past_key_values": past_key_values, "encoder_outputs": encoder_outputs, "attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "decoder_attention_mask": decoder_attention_mask, "cross_attn_head_mask": cross_attn_head_mask, "use_cache": use_cache, } def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return self._shift_right(labels) def _reorder_cache(self, past_key_values, beam_idx): # if decoder past is not included in output # speedy decoding is disabled and no need to reorder if past_key_values is None: logger.warning( "You might want to consider setting `use_cache=True` to speed up decoding" ) return past_key_values reordered_decoder_past = () for layer_past_states in past_key_values: # get the correct batch idx from layer past batch dim # batch dim of `past` is at 2nd position reordered_layer_past_states = () for layer_past_state in layer_past_states: # need to set correct `past` for each of the four key / value states reordered_layer_past_states = reordered_layer_past_states + ( layer_past_state.index_select( 0, beam_idx.to(layer_past_state.device) ), ) assert reordered_layer_past_states[0].shape == layer_past_states[0].shape assert len(reordered_layer_past_states) == len(layer_past_states) reordered_decoder_past = reordered_decoder_past + ( reordered_layer_past_states, ) return reordered_decoder_past
text-generation-inference/server/text_generation_server/models/custom_modeling/t5_modeling.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/t5_modeling.py", "repo_id": "text-generation-inference", "token_count": 22698 }
312
import asyncio import os import torch import time import signal from grpc import aio from loguru import logger from grpc_reflection.v1alpha import reflection from pathlib import Path from typing import List, Optional from text_generation_server.cache import Cache from text_generation_server.interceptor import ExceptionInterceptor from text_generation_server.models import Model, get_model_with_lora_adapters from text_generation_server.utils.adapter import AdapterInfo from text_generation_server.utils.prefill_chunking import set_max_prefill_tokens try: from text_generation_server.models.vlm_causal_lm import ( VlmCausalLMBatch, ) from text_generation_server.models.idefics_causal_lm import IdeficsCausalLMBatch from text_generation_server.models.mllama_causal_lm import MllamaCausalLMBatch VLM_BATCH_TYPES = { VlmCausalLMBatch, IdeficsCausalLMBatch, MllamaCausalLMBatch, } except (ImportError, NotImplementedError): # These imports can fail on CPU/Non flash. VLM_BATCH_TYPES = set() from text_generation_server.pb import generate_pb2_grpc, generate_pb2 from text_generation_server.tracing import UDSOpenTelemetryAioServerInterceptor from text_generation_server.models.globals import set_adapter_to_index class SignalHandler: KEEP_PROCESSING = True def __init__(self): signal.signal(signal.SIGINT, self.exit_gracefully) signal.signal(signal.SIGTERM, self.exit_gracefully) def set_keep_processing(self, value: bool): self.KEEP_PROCESSING = value def exit_gracefully(self, signum, frame): print(f"Exiting gracefully: Signal {signum}") self.set_keep_processing(False) class TextGenerationService(generate_pb2_grpc.TextGenerationServiceServicer): def __init__( self, model: Model, cache: Cache, server_urls: List[str], ): self.cache = cache self.model = model # Quantize is resolved during model loading self.quantize = model.quantize self.server_urls = server_urls # For some reason, inference_mode does not work well with GLOO which we use on CPU # if model.device.type == "cuda": # # Force inference mode for the lifetime of TextGenerationService # self._inference_mode_raii_guard = torch._C._InferenceMode(True) async def Info(self, request, context): return self.model.info async def Health(self, request, context): if self.model.device.type == "cuda": torch.zeros((2, 2)).cuda() return generate_pb2.HealthResponse() async def ServiceDiscovery(self, request, context): return generate_pb2.ServiceDiscoveryResponse(urls=self.server_urls) async def ClearCache(self, request, context): if request.HasField("id"): self.cache.delete(request.id) else: self.cache.clear() return generate_pb2.ClearCacheResponse() async def FilterBatch(self, request, context): batch = self.cache.pop(request.batch_id) if batch is None: raise ValueError(f"Batch ID {request.batch_id} not found in cache.") filtered_batch = batch.filter(request.request_ids) self.cache.set(filtered_batch) return generate_pb2.FilterBatchResponse(batch=filtered_batch.to_pb()) async def Warmup(self, request, context): set_max_prefill_tokens(request.max_prefill_tokens) if self.quantize in {"exl2", "gptq"}: try: # When using GPTQ, Exllama kernels need some global kernels # For which we have the finale shapes only after the model has loaded # This will allocate those buffers. from text_generation_server.layers.gptq import ( create_exllama_buffers, set_device, ) set_device(self.model.device) create_exllama_buffers(request.max_prefill_tokens) except ImportError: pass if ( self.model.batch_type in VLM_BATCH_TYPES ): # Hack, i would rather use kwargs in the `from_pb` call batch = self.model.batch_type.from_pb_processor( request.batch, self.model.tokenizer, self.model.processor, self.model.model.config, self.model.dtype, self.model.device, ) else: batch = self.model.batch_type.from_pb( request.batch, self.model.tokenizer, self.model.dtype, self.model.device ) # Override default values with None for clearer semantics. max_input_tokens = ( request.max_input_tokens if request.HasField("max_input_tokens") else None ) max_total_tokens = ( request.max_total_tokens if request.HasField("max_total_tokens") else None ) max_supported_total_tokens, max_input_tokens, max_total_tokens = ( self.model.warmup(batch, max_input_tokens, max_total_tokens) ) return generate_pb2.WarmupResponse( max_supported_total_tokens=max_supported_total_tokens, max_input_tokens=max_input_tokens, max_total_tokens=max_total_tokens, ) async def Prefill(self, request, context): start = time.time_ns() if ( self.model.batch_type in VLM_BATCH_TYPES ): # Hack, i would rather use kwargs in the `from_pb` call batch = self.model.batch_type.from_pb_processor( request.batch, self.model.tokenizer, self.model.processor, self.model.model.config, self.model.dtype, self.model.device, ) else: batch = self.model.batch_type.from_pb( request.batch, self.model.tokenizer, self.model.dtype, self.model.device ) concat_ns = None if self.model.support_chunking: if request.HasField("cached_batch"): cached_batch = self.cache.pop(request.cached_batch.id) if cached_batch is None: raise ValueError( f"Batch ID {request.cached_batch.id} not found in cache." ) start_concat = time.time_ns() batch = self.model.batch_type.concatenate([cached_batch, batch]) concat_ns = time.time_ns() - start_concat generations, next_batch, timings = self.model.generate_token(batch) self.cache.set(next_batch) return generate_pb2.PrefillResponse( generations=[generation.to_pb() for generation in generations], batch=next_batch.to_pb() if next_batch else None, forward_ns=timings[0], decode_ns=timings[1], total_ns=time.time_ns() - start, concat_ns=concat_ns, ) async def Decode(self, request, context): start = time.time_ns() if len(request.batches) == 0: raise ValueError("Must provide at least one batch") batches = [] for batch_pb in request.batches: batch = self.cache.pop(batch_pb.id) if batch is None: raise ValueError(f"Batch ID {batch_pb.id} not found in cache.") batches.append(batch) if len(batches) == 0: raise ValueError("All batches are empty") if len(batches) > 1: start_concat = time.time_ns() batch = self.model.batch_type.concatenate(batches) concat_ns = time.time_ns() - start_concat else: batch = batches[0] concat_ns = None generations, next_batch, timings = self.model.generate_token(batch) self.cache.set(next_batch) return generate_pb2.DecodeResponse( generations=[generation.to_pb() for generation in generations], batch=next_batch.to_pb() if next_batch else None, concat_ns=concat_ns, forward_ns=timings[0], decode_ns=timings[1], total_ns=time.time_ns() - start, ) def serve( model_id: str, lora_adapters: Optional[List[AdapterInfo]], revision: Optional[str], sharded: bool, quantize: Optional[str], speculate: Optional[int], dtype: Optional[str], kv_cache_dtype: Optional[str], trust_remote_code: bool, uds_path: Path, max_input_tokens: int, ): async def serve_inner( model_id: str, lora_adapters: Optional[List[AdapterInfo]], revision: Optional[str], sharded: bool = False, quantize: Optional[str] = None, speculate: Optional[int] = None, dtype: Optional[str] = None, kv_cache_dtype: Optional[str] = None, trust_remote_code: bool = False, ): unix_socket_template = "unix://{}-{}" adapter_to_index = {} if sharded: server_urls = [ unix_socket_template.format(uds_path, rank) for rank in range(int(os.environ["WORLD_SIZE"])) ] local_url = server_urls[int(os.environ["RANK"])] else: local_url = unix_socket_template.format(uds_path, 0) server_urls = [local_url] try: model = get_model_with_lora_adapters( model_id, lora_adapters, revision, sharded, quantize, speculate, dtype, kv_cache_dtype, trust_remote_code, max_input_tokens, adapter_to_index, ) except Exception: logger.exception("Error when initializing model") raise signal_handler = SignalHandler() set_adapter_to_index(adapter_to_index) server = aio.server( interceptors=[ ExceptionInterceptor(lambda: signal_handler.set_keep_processing(False)), UDSOpenTelemetryAioServerInterceptor(), ], options=[ # Set the maximum possible message length: i32::MAX ("grpc.max_receive_message_length", (1 << 31) - 1) ], ) generate_pb2_grpc.add_TextGenerationServiceServicer_to_server( TextGenerationService(model, Cache(), server_urls), server ) SERVICE_NAMES = ( generate_pb2.DESCRIPTOR.services_by_name["TextGenerationService"].full_name, reflection.SERVICE_NAME, ) reflection.enable_server_reflection(SERVICE_NAMES, server) server.add_insecure_port(local_url) await server.start() logger.info("Server started at {}".format(local_url)) while signal_handler.KEEP_PROCESSING: await asyncio.sleep(0.5) asyncio.run( serve_inner( model_id, lora_adapters, revision, sharded, quantize, speculate, dtype, kv_cache_dtype, trust_remote_code, ) )
text-generation-inference/server/text_generation_server/server.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/server.py", "repo_id": "text-generation-inference", "token_count": 5383 }
313
{ "name": "tokenizers-win32-arm64-msvc", "version": "0.13.4-rc1", "os": [ "win32" ], "cpu": [ "arm64" ], "main": "tokenizers.win32-arm64-msvc.node", "files": [ "tokenizers.win32-arm64-msvc.node" ], "description": "Tokenizers platform specific bindings", "keywords": [ "napi-rs", "NAPI", "N-API", "Rust", "node-addon", "node-addon-api" ], "license": "MIT", "engines": { "node": ">= 10" }, "publishConfig": { "registry": "https://registry.npmjs.org/", "access": "public" }, "repository": "tokenizers" }
tokenizers/bindings/node/npm/win32-arm64-msvc/package.json/0
{ "file_path": "tokenizers/bindings/node/npm/win32-arm64-msvc/package.json", "repo_id": "tokenizers", "token_count": 277 }
314
extern crate tokenizers as tk; use crate::models::Model; use napi::bindgen_prelude::*; use std::sync::{Arc, RwLock}; use tokenizers::models::bpe::{BpeBuilder, BPE}; use tokenizers::models::wordlevel::{WordLevel, WordLevelBuilder}; use tokenizers::models::wordpiece::{WordPiece, WordPieceBuilder}; pub struct BPEFromFilesTask { pub(crate) builder: Option<BpeBuilder>, } impl Task for BPEFromFilesTask { type Output = BPE; type JsValue = Model; fn compute(&mut self) -> Result<Self::Output> { self .builder .take() .ok_or(Error::from_reason("Empty builder".to_string()))? .build() .map_err(|e| Error::from_reason(format!("{e}"))) } fn resolve(&mut self, _env: Env, output: Self::Output) -> Result<Self::JsValue> { Ok(Model { model: Some(Arc::new(RwLock::new(output.into()))), }) } } pub struct WordPieceFromFilesTask { pub(crate) builder: Option<WordPieceBuilder>, } impl Task for WordPieceFromFilesTask { type Output = WordPiece; type JsValue = Model; fn compute(&mut self) -> Result<Self::Output> { self .builder .take() .ok_or(Error::from_reason("Empty builder".to_string()))? .build() .map_err(|e| Error::from_reason(format!("{e}"))) } fn resolve(&mut self, _env: Env, output: Self::Output) -> Result<Self::JsValue> { Ok(Model { model: Some(Arc::new(RwLock::new(output.into()))), }) } } pub struct WordLevelFromFilesTask { pub(crate) builder: Option<WordLevelBuilder>, } impl Task for WordLevelFromFilesTask { type Output = WordLevel; type JsValue = Model; fn compute(&mut self) -> Result<Self::Output> { self .builder .take() .ok_or(Error::from_reason("Empty builder".to_string()))? .build() .map_err(|e| Error::from_reason(format!("{e}"))) } fn resolve(&mut self, _env: Env, output: Self::Output) -> Result<Self::JsValue> { Ok(Model { model: Some(Arc::new(RwLock::new(output.into()))), }) } }
tokenizers/bindings/node/src/tasks/models.rs/0
{ "file_path": "tokenizers/bindings/node/src/tasks/models.rs", "repo_id": "tokenizers", "token_count": 797 }
315
import pytest def pytest_addoption(parser): parser.addoption("--runslow", action="store_true", default=False, help="run slow tests") def pytest_configure(config): config.addinivalue_line("markers", "slow: mark test as slow to run") def pytest_collection_modifyitems(config, items): if config.getoption("--runslow"): # --runslow given in cli: do not skip slow tests return skip_slow = pytest.mark.skip(reason="need --runslow option to run") for item in items: if "slow" in item.keywords: item.add_marker(skip_slow)
tokenizers/bindings/python/conftest.py/0
{ "file_path": "tokenizers/bindings/python/conftest.py", "repo_id": "tokenizers", "token_count": 217 }
316
from typing import Dict, Iterator, List, Optional, Tuple, Union from tokenizers import AddedToken, Tokenizer, decoders, pre_tokenizers, trainers from tokenizers.models import BPE from tokenizers.normalizers import NFKC from .base_tokenizer import BaseTokenizer class SentencePieceBPETokenizer(BaseTokenizer): """SentencePiece BPE Tokenizer Represents the BPE algorithm, with the pretokenization used by SentencePiece """ def __init__( self, vocab: Optional[Union[str, Dict[str, int]]] = None, merges: Optional[Union[str, List[Tuple[str, str]]]] = None, unk_token: Union[str, AddedToken] = "<unk>", replacement: str = "▁", add_prefix_space: bool = True, dropout: Optional[float] = None, fuse_unk: Optional[bool] = False, ): if vocab is not None and merges is not None: tokenizer = Tokenizer(BPE(vocab, merges, dropout=dropout, unk_token=unk_token, fuse_unk=fuse_unk)) else: tokenizer = Tokenizer(BPE(dropout=dropout, unk_token=unk_token, fuse_unk=fuse_unk)) if tokenizer.token_to_id(str(unk_token)) is not None: tokenizer.add_special_tokens([str(unk_token)]) tokenizer.normalizer = NFKC() prepend_scheme = "always" if add_prefix_space else "never" tokenizer.pre_tokenizer = pre_tokenizers.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme) tokenizer.decoder = decoders.Metaspace(replacement=replacement, prepend_scheme=prepend_scheme) parameters = { "model": "SentencePieceBPE", "unk_token": unk_token, "replacement": replacement, "add_prefix_space": add_prefix_space, "dropout": dropout, } super().__init__(tokenizer, parameters) @staticmethod def from_file(vocab_filename: str, merges_filename: str, **kwargs): vocab, merges = BPE.read_file(vocab_filename, merges_filename) return SentencePieceBPETokenizer(vocab, merges, **kwargs) def train( self, files: Union[str, List[str]], vocab_size: int = 30000, min_frequency: int = 2, special_tokens: List[Union[str, AddedToken]] = ["<unk>"], limit_alphabet: int = 1000, initial_alphabet: List[str] = [], show_progress: bool = True, ): """Train the model using the given files""" trainer = trainers.BpeTrainer( vocab_size=vocab_size, min_frequency=min_frequency, special_tokens=special_tokens, limit_alphabet=limit_alphabet, initial_alphabet=initial_alphabet, show_progress=show_progress, ) if isinstance(files, str): files = [files] self._tokenizer.train(files, trainer=trainer) def train_from_iterator( self, iterator: Union[Iterator[str], Iterator[Iterator[str]]], vocab_size: int = 30000, min_frequency: int = 2, special_tokens: List[Union[str, AddedToken]] = ["<unk>"], limit_alphabet: int = 1000, initial_alphabet: List[str] = [], show_progress: bool = True, length: Optional[int] = None, ): """Train the model using the given iterator""" trainer = trainers.BpeTrainer( vocab_size=vocab_size, min_frequency=min_frequency, special_tokens=special_tokens, limit_alphabet=limit_alphabet, initial_alphabet=initial_alphabet, show_progress=show_progress, ) self._tokenizer.train_from_iterator( iterator, trainer=trainer, length=length, )
tokenizers/bindings/python/py_src/tokenizers/implementations/sentencepiece_bpe.py/0
{ "file_path": "tokenizers/bindings/python/py_src/tokenizers/implementations/sentencepiece_bpe.py", "repo_id": "tokenizers", "token_count": 1674 }
317
stable
tokenizers/bindings/python/rust-toolchain/0
{ "file_path": "tokenizers/bindings/python/rust-toolchain", "repo_id": "tokenizers", "token_count": 2 }
318
use pyo3::prelude::*; use std::collections::VecDeque; /// An simple iterator that can be instantiated with a specified length. /// We use this with iterators that don't have a size_hint but we might /// know its size. This is useful with progress bars for example. pub struct MaybeSizedIterator<I> { length: Option<usize>, iter: I, } impl<I> MaybeSizedIterator<I> where I: Iterator, { pub fn new(iter: I, length: Option<usize>) -> Self { Self { length, iter } } } impl<I> Iterator for MaybeSizedIterator<I> where I: Iterator, { type Item = I::Item; fn next(&mut self) -> Option<Self::Item> { self.iter.next() } fn size_hint(&self) -> (usize, Option<usize>) { (self.length.unwrap_or(0), None) } } /// A buffered iterator that takes care of locking the GIL only when needed. /// The `PyIterator` provided by PyO3 keeps a Python GIL token all along /// and thus doesn't allow us to release the GIL to allow having other threads. /// /// This iterator serves two purposes: /// - First, as opposed to the `pyo3::PyIterator`, it is Send and can easily be parallelized /// - Second, this let us release the GIL between two refills of the buffer, allowing other /// Python threads to work pub struct PyBufferedIterator<T, F> { iter: Option<Py<PyAny>>, converter: F, buffer: VecDeque<PyResult<T>>, size: usize, } impl<T, F, I> PyBufferedIterator<T, F> where F: Fn(Bound<'_, PyAny>) -> I, I: IntoIterator<Item = PyResult<T>>, { /// Create a new PyBufferedIterator using the provided Python object. /// This object must implement the Python Iterator Protocol, and an error will /// be return if the contract is not respected. /// /// The `converter` provides a way to convert each item in the iterator into /// something that doesn't embed a 'py token and thus allows the GIL to be released /// /// The `buffer_size` represents the number of items that we buffer before we /// need to acquire the GIL again. pub fn new(iter: &Bound<'_, PyAny>, converter: F, buffer_size: usize) -> PyResult<Self> { let py = iter.py(); let iter: Py<PyAny> = unsafe { Bound::from_borrowed_ptr_or_err(py, pyo3::ffi::PyObject_GetIter(iter.as_ptr()))?.into() }; Ok(Self { iter: Some(iter), converter, buffer: VecDeque::with_capacity(buffer_size), size: buffer_size, }) } /// Refill the buffer, and set `self.iter` as `None` if nothing more to get fn refill(&mut self) -> PyResult<()> { if self.iter.is_none() { return Ok(()); } Python::with_gil(|py| loop { if self.buffer.len() >= self.size { return Ok(()); } match unsafe { Bound::from_owned_ptr_or_opt( py, pyo3::ffi::PyIter_Next(self.iter.as_ref().unwrap().bind(py).as_ptr()), ) } { Some(obj) => self.buffer.extend((self.converter)(obj)), None => { if PyErr::occurred(py) { return Err(PyErr::fetch(py)); } else { self.iter = None; } } }; if self.iter.is_none() { return Ok(()); } }) } } impl<T, F, I> Iterator for PyBufferedIterator<T, F> where F: Fn(Bound<'_, PyAny>) -> I, I: IntoIterator<Item = PyResult<T>>, { type Item = PyResult<T>; fn next(&mut self) -> Option<Self::Item> { if !self.buffer.is_empty() { self.buffer.pop_front() } else if self.iter.is_some() { if let Err(e) = self.refill() { return Some(Err(e)); } self.next() } else { None } } }
tokenizers/bindings/python/src/utils/iterators.rs/0
{ "file_path": "tokenizers/bindings/python/src/utils/iterators.rs", "repo_id": "tokenizers", "token_count": 1807 }
319
import pickle import numpy as np import pytest from tokenizers import AddedToken, Encoding, Tokenizer from tokenizers.implementations import BertWordPieceTokenizer from tokenizers.models import BPE, Model, Unigram from tokenizers.pre_tokenizers import ByteLevel, Metaspace from tokenizers.processors import RobertaProcessing, TemplateProcessing from tokenizers.normalizers import Strip, Lowercase, Sequence from tokenizers.decoders import ByteFallback, DecodeStream, Metaspace as DecoderMetaspace from ..utils import bert_files, data_dir, multiprocessing_with_parallelism, roberta_files class TestAddedToken: def test_instantiate_with_content_only(self): added_token = AddedToken("<mask>") added_token.content = "<MASK>" assert added_token.content == "<MASK>" assert type(added_token) == AddedToken added_token.content = added_token.content.lower() assert added_token.special == False added_token.special = True assert added_token.special == True added_token.special = False assert str(added_token) == "<mask>" assert ( repr(added_token) == 'AddedToken("<mask>", rstrip=False, lstrip=False, single_word=False, normalized=True, special=False)' ) assert added_token.rstrip == False assert added_token.lstrip == False assert added_token.single_word == False assert added_token.normalized == True assert isinstance(pickle.loads(pickle.dumps(added_token)), AddedToken) def test_can_set_rstrip(self): added_token = AddedToken("<mask>", rstrip=True) assert added_token.rstrip == True assert added_token.lstrip == False assert added_token.single_word == False assert added_token.normalized == True def test_can_set_lstrip(self): added_token = AddedToken("<mask>", lstrip=True) assert added_token.rstrip == False assert added_token.lstrip == True assert added_token.single_word == False assert added_token.normalized == True def test_can_set_single_world(self): added_token = AddedToken("<mask>", single_word=True) assert added_token.rstrip == False assert added_token.lstrip == False assert added_token.single_word == True assert added_token.normalized == True def test_can_set_normalized(self): added_token = AddedToken("<mask>", normalized=False) assert added_token.rstrip == False assert added_token.lstrip == False assert added_token.single_word == False assert added_token.normalized == False class TestTokenizer: def test_has_expected_type_and_methods(self): tokenizer = Tokenizer(BPE()) assert type(tokenizer) == Tokenizer assert callable(tokenizer.num_special_tokens_to_add) assert callable(tokenizer.get_vocab) assert callable(tokenizer.get_vocab_size) assert callable(tokenizer.enable_truncation) assert callable(tokenizer.no_truncation) assert callable(tokenizer.enable_padding) assert callable(tokenizer.no_padding) assert callable(tokenizer.encode) assert callable(tokenizer.encode_batch) assert callable(tokenizer.decode) assert callable(tokenizer.decode_batch) assert callable(tokenizer.token_to_id) assert callable(tokenizer.id_to_token) assert callable(tokenizer.add_tokens) assert callable(tokenizer.add_special_tokens) assert callable(tokenizer.train) assert callable(tokenizer.post_process) assert isinstance(tokenizer.model, Model) assert tokenizer.normalizer is None assert tokenizer.pre_tokenizer is None assert tokenizer.post_processor is None assert tokenizer.decoder is None assert isinstance(pickle.loads(pickle.dumps(Tokenizer(BPE()))), Tokenizer) def test_add_tokens(self): tokenizer = Tokenizer(BPE()) added = tokenizer.add_tokens(["my", "name", "is", "john"]) assert added == 4 tokens = [AddedToken("the"), AddedToken("quick", normalized=False), AddedToken()] assert tokens[0].normalized == True added = tokenizer.add_tokens(tokens) assert added == 2 assert tokens[0].normalized == True assert tokens[1].normalized == False def test_add_special_tokens(self): tokenizer = Tokenizer(BPE()) # Can add special tokens as `str` added = tokenizer.add_special_tokens(["my", "name", "is", "john"]) assert added == 4 # Can add special tokens as `AddedToken` tokens = [AddedToken("the"), AddedToken("quick", normalized=True), AddedToken()] assert tokens[0].normalized == True added = tokenizer.add_special_tokens(tokens) assert added == 2 assert tokens[0].normalized == False assert tokens[1].normalized == True def test_encode(self): tokenizer = Tokenizer(BPE()) tokenizer.add_tokens(["my", "name", "is", "john", "pair"]) # Can encode single sequence output = tokenizer.encode("my name is john") assert output.tokens == ["my", "name", "is", "john"] assert type(output.ids) == list assert type(output.type_ids) == list assert type(output.offsets) == list with pytest.warns(DeprecationWarning): assert type(output.words) == list assert type(output.word_ids) == list assert type(output.special_tokens_mask) == list assert type(output.attention_mask) == list assert type(output.overflowing) == list # Can encode a pair of sequences output = tokenizer.encode("my name is john", "pair") assert output.tokens == ["my", "name", "is", "john", "pair"] assert isinstance(pickle.loads(pickle.dumps(output)), Encoding) # Can encode a single pre-tokenized sequence output = tokenizer.encode(["my", "name", "is", "john"], is_pretokenized=True) assert output.tokens == ["my", "name", "is", "john"] # Can encode a batch with both a single sequence and a pair of sequences output = tokenizer.encode_batch(["my name is john", ("my name is john", "pair")]) assert len(output) == 2 def test_encode_formats(self, bert_files): with pytest.deprecated_call(): tokenizer = BertWordPieceTokenizer(bert_files["vocab"]) # Encode output = tokenizer.encode("my name is john") assert output.tokens == ["[CLS]", "my", "name", "is", "john", "[SEP]"] output = tokenizer.encode("my name is john", "pair") assert output.tokens == ["[CLS]", "my", "name", "is", "john", "[SEP]", "pair", "[SEP]"] output = tokenizer.encode(["my", "name", "is", "john"], is_pretokenized=True) assert output.tokens == ["[CLS]", "my", "name", "is", "john", "[SEP]"] output = tokenizer.encode(["my", "name", "is", "john"], ["pair"], is_pretokenized=True) assert output.tokens == ["[CLS]", "my", "name", "is", "john", "[SEP]", "pair", "[SEP]"] # Encode batch result_single = [ ["[CLS]", "my", "name", "is", "john", "[SEP]"], ["[CLS]", "my", "name", "is", "georges", "[SEP]"], ] result_pair = [ ["[CLS]", "my", "name", "is", "john", "[SEP]", "pair", "[SEP]"], ["[CLS]", "my", "name", "is", "georges", "[SEP]", "pair", "[SEP]"], ] def format(encodings): return [e.tokens for e in encodings] def test_single(input, is_pretokenized=False): output = tokenizer.encode_batch(input, is_pretokenized=is_pretokenized) assert format(output) == result_single def test_pair(input, is_pretokenized=False): output = tokenizer.encode_batch(input, is_pretokenized=is_pretokenized) assert format(output) == result_pair # Classic inputs # Lists test_single(["My name is John", "My name is Georges"]) test_pair([("my name is john", "pair"), ("my name is georges", "pair")]) test_pair([["my name is john", "pair"], ["my name is georges", "pair"]]) # Tuples test_single(("My name is John", "My name is Georges")) test_pair((("My name is John", "pair"), ("My name is Georges", "pair"))) # Numpy test_single(np.array(["My name is John", "My name is Georges"])) test_pair(np.array([("My name is John", "pair"), ("My name is Georges", "pair")])) test_pair(np.array([["My name is John", "pair"], ["My name is Georges", "pair"]])) # PreTokenized inputs # Lists test_single([["My", "name", "is", "John"], ["My", "name", "is", "Georges"]], True) test_pair( [ (["My", "name", "is", "John"], ["pair"]), (["My", "name", "is", "Georges"], ["pair"]), ], True, ) test_pair( [ [["My", "name", "is", "John"], ["pair"]], [["My", "name", "is", "Georges"], ["pair"]], ], True, ) # Tuples test_single((("My", "name", "is", "John"), ("My", "name", "is", "Georges")), True) test_pair( ( (("My", "name", "is", "John"), ("pair",)), (("My", "name", "is", "Georges"), ("pair",)), ), True, ) test_pair( ( (["My", "name", "is", "John"], ["pair"]), (["My", "name", "is", "Georges"], ["pair"]), ), True, ) # Numpy test_single( np.array([["My", "name", "is", "John"], ["My", "name", "is", "Georges"]]), True, ) test_single( np.array((("My", "name", "is", "John"), ("My", "name", "is", "Georges"))), True, ) test_pair( np.array( [ [["My", "name", "is", "John"], ["pair"]], [["My", "name", "is", "Georges"], ["pair"]], ], dtype=object, ), True, ) test_pair( np.array( ( (("My", "name", "is", "John"), ("pair",)), (("My", "name", "is", "Georges"), ("pair",)), ), dtype=object, ), True, ) # Mal formed with pytest.raises(TypeError, match="TextInputSequence must be str"): tokenizer.encode([["my", "name"]]) with pytest.raises(TypeError, match="TextInputSequence must be str"): tokenizer.encode("My name is john", [["pair"]]) with pytest.raises(TypeError, match="TextInputSequence must be str"): tokenizer.encode("my name is john", ["pair"]) with pytest.raises(TypeError, match="InputSequence must be Union[List[str]"): tokenizer.encode("My name is john", is_pretokenized=True) with pytest.raises(TypeError, match="InputSequence must be Union[List[str]"): tokenizer.encode("My name is john", ["pair"], is_pretokenized=True) with pytest.raises(TypeError, match="InputSequence must be Union[List[str]"): tokenizer.encode(["My", "name", "is", "John"], "pair", is_pretokenized=True) def test_encode_add_special_tokens(self, roberta_files): with pytest.deprecated_call(): tokenizer = Tokenizer(BPE(roberta_files["vocab"], roberta_files["merges"])) tokenizer.add_special_tokens(["<s>", "</s>"]) tokenizer.pre_tokenizer = ByteLevel(add_prefix_space=True) tokenizer.post_processor = RobertaProcessing( ("</s>", tokenizer.token_to_id("</s>")), ("<s>", tokenizer.token_to_id("<s>")), ) # Can encode with special tokens output_with_specials = tokenizer.encode("My name is John", add_special_tokens=True) assert output_with_specials.tokens == ["<s>", "ĠMy", "Ġname", "Ġis", "ĠJohn", "</s>"] # Can encode without special tokens output_without_specials = tokenizer.encode("My name is John", add_special_tokens=False) assert output_without_specials.tokens == ["ĠMy", "Ġname", "Ġis", "ĠJohn"] def test_truncation(self): tokenizer = Tokenizer(BPE()) tokenizer.add_tokens(["my", "name", "is", "john", "pair"]) tokenizer.enable_truncation(2) # Can truncate single sequences output = tokenizer.encode("my name is john") assert output.tokens == ["my", "name"] # Can truncate pair sequences as well output = tokenizer.encode("my name is john", "pair") assert output.tokens == ["my", "pair"] # Can get the params and give them to enable_truncation trunc = tokenizer.truncation tokenizer.enable_truncation(**trunc) # Left truncation direction tokenizer.enable_truncation(2, direction="left") output = tokenizer.encode("my name is john") assert output.tokens == ["is", "john"] output = tokenizer.encode("my name is john", "pair") assert output.tokens == ["john", "pair"] def test_padding(self): tokenizer = Tokenizer(BPE()) tokenizer.add_tokens(["my", "name", "is", "john", "pair"]) # By default it does nothing when encoding single sequence tokenizer.enable_padding() output = tokenizer.encode("my name") assert output.tokens == ["my", "name"] # Can pad to the longest in a batch output = tokenizer.encode_batch(["my name", "my name is john"]) assert all([len(encoding) == 4 for encoding in output]) # Can pad to the specified length otherwise tokenizer.enable_padding(length=4) output = tokenizer.encode("my name") assert output.tokens == ["my", "name", "[PAD]", "[PAD]"] output = tokenizer.encode("my name", "pair") assert output.tokens == ["my", "name", "pair", "[PAD]"] # Can get the params and give them to enable_padding padding = tokenizer.padding tokenizer.enable_padding(**padding) def test_decode(self): tokenizer = Tokenizer(BPE()) tokenizer.add_tokens(["my", "name", "is", "john", "pair"]) # Can decode single sequences output = tokenizer.decode([0, 1, 2, 3]) assert output == "my name is john" # Can decode batch output = tokenizer.decode_batch([[0, 1, 2, 3], [4]]) assert output == ["my name is john", "pair"] # Can decode stream stream = DecodeStream(skip_special_tokens=False) assert stream.step(tokenizer, 0) == "my" assert stream.step(tokenizer, 1) == " name" assert stream.step(tokenizer, 2) == " is" assert stream.step(tokenizer, 3) == " john" def test_decode_stream(self): vocab = [ ("<unk>", 0.0), ("<0x20>", -0.1), ("<0xC3>", -0.2), ("<0xA9>", -0.3), ] tokenizer = Tokenizer(Unigram(vocab, 0, byte_fallback=True)) tokenizer.decoder = ByteFallback() stream = DecodeStream(skip_special_tokens=False) assert stream.step(tokenizer, 1) == " " assert stream.step(tokenizer, 2) == None assert stream.step(tokenizer, 3) == "é" vocab = [ ("<unk>", 0.0), ("▁This", -0.1), ] tokenizer = Tokenizer(Unigram(vocab, 0, byte_fallback=False)) tokenizer.decoder = DecoderMetaspace() stream = DecodeStream(skip_special_tokens=False) assert stream.step(tokenizer, 1) == "This" assert stream.step(tokenizer, 1) == " This" def test_get_vocab(self): tokenizer = Tokenizer(BPE()) tokenizer.add_tokens(["my", "name", "is", "john", "pair"]) # Can retrieve vocab with added tokens vocab = tokenizer.get_vocab(with_added_tokens=True) assert vocab == {"is": 2, "john": 3, "my": 0, "name": 1, "pair": 4} # Can retrieve vocab without added tokens vocab = tokenizer.get_vocab(with_added_tokens=False) assert vocab == {} # Can retrieve added token decoder vocab = tokenizer.get_added_tokens_decoder() assert vocab == { 0: AddedToken("my", rstrip=False, lstrip=False, single_word=False, normalized=True, special=False), 1: AddedToken("name", rstrip=False, lstrip=False, single_word=False, normalized=True, special=False), 2: AddedToken("is", rstrip=False, lstrip=False, single_word=False, normalized=True, special=False), 3: AddedToken("john", rstrip=False, lstrip=False, single_word=False, normalized=True, special=False), 4: AddedToken("pair", rstrip=False, lstrip=False, single_word=False, normalized=True, special=False), } def test_get_vocab_size(self): tokenizer = Tokenizer(BPE()) tokenizer.add_tokens(["my", "name", "is", "john", "pair"]) # Can retrieve vocab's size with added tokens size = tokenizer.get_vocab_size(with_added_tokens=True) assert size == 5 # Can retrieve vocab's size without added tokens size = tokenizer.get_vocab_size(with_added_tokens=False) assert size == 0 def test_post_process(self): tokenizer = Tokenizer(BPE()) tokenizer.add_tokens(["my", "name", "is", "john", "pair"]) tokenizer.enable_truncation(2) tokenizer.enable_padding(length=4) encoding = tokenizer.encode("my name is john") pair_encoding = tokenizer.encode("pair") # Can post process a single encoding output = tokenizer.post_process(encoding) assert output.tokens == ["my", "name", "[PAD]", "[PAD]"] # Can post process a pair of encodings output = tokenizer.post_process(encoding, pair_encoding) assert output.tokens == ["my", "pair", "[PAD]", "[PAD]"] def test_multiprocessing_with_parallelism(self): tokenizer = Tokenizer(BPE()) multiprocessing_with_parallelism(tokenizer, False) multiprocessing_with_parallelism(tokenizer, True) def test_from_pretrained(self): tokenizer = Tokenizer.from_pretrained("bert-base-cased") output = tokenizer.encode("Hey there dear friend!", add_special_tokens=False) assert output.tokens == ["Hey", "there", "dear", "friend", "!"] def test_from_pretrained_revision(self): tokenizer = Tokenizer.from_pretrained("anthony/tokenizers-test") output = tokenizer.encode("Hey there dear friend!", add_special_tokens=False) assert output.tokens == ["hey", "there", "dear", "friend", "!"] tokenizer = Tokenizer.from_pretrained("anthony/tokenizers-test", revision="gpt-2") output = tokenizer.encode("Hey there dear friend!", add_special_tokens=False) assert output.tokens == ["Hey", "Ġthere", "Ġdear", "Ġfriend", "!"] def test_unigram_byte_fallback(self): vocab = [ ("<unk>", 0.0), ("A", -0.01), ("sen", -0.02), ("te", -0.03), ("n", -0.04), ("ce", -0.05), ("<0xF0>", -0.06), ("<0x9F>", -0.06), ("<0xA4>", -0.06), ("<0x97>", -0.06), (" ", -0.4), ] tokenizer = tokenizer = Tokenizer(Unigram(vocab, 0, byte_fallback=False)) output = tokenizer.encode("A sentence 🤗") assert output.ids == [1, 10, 2, 3, 4, 5, 10, 0] assert output.tokens == ["A", " ", "sen", "te", "n", "ce", " ", "🤗"] tokenizer = Tokenizer(Unigram(vocab, 0, byte_fallback=True)) output = tokenizer.encode("A sentence 🤗") assert output.ids == [1, 10, 2, 3, 4, 5, 10, 6, 7, 8, 9] assert output.tokens == ["A", " ", "sen", "te", "n", "ce", " ", "<0xF0>", "<0x9F>", "<0xA4>", "<0x97>"] def test_encode_special_tokens(self): tokenizer = Tokenizer.from_pretrained("t5-base") tokenizer.add_tokens(["<eot>"]) tokenizer.add_special_tokens(["<end_of_text>"]) output = tokenizer.encode("Hey there<end_of_text> dear<eot>friend!", add_special_tokens=False) assert output.tokens == ["▁Hey", "▁there", "<end_of_text>", "▁dear", "<eot>", "▁friend", "!"] tokenizer.encode_special_tokens = True assert tokenizer.encode_special_tokens == True output = tokenizer.encode("Hey there<end_of_text> dear<eot>friend!", add_special_tokens=False) assert output.tokens == [ "▁Hey", "▁there", "<", "end", "_", "of", "_", "text", ">", "▁dear", "<eot>", "▁friend", "!", ] tokenizer.add_tokens(["of_text>"]) output = tokenizer.encode("Hey there<end_of_text> dear<eot>friend!", add_special_tokens=False) assert output.tokens == ["▁Hey", "▁there", "<", "end", "_", "of_text>", "▁dear", "<eot>", "▁friend", "!"] def test_splitting(self): tokenizer = Tokenizer.from_pretrained("hf-internal-testing/llama-new-metaspace") tokenizer.pre_tokenizer.split = False tokenizer.add_tokens([AddedToken("<REPR_END>", rstrip=True, lstrip=True)]) assert tokenizer.encode("<REPR_END>inform<s>. Hey. .", add_special_tokens=False).tokens == [ "<REPR_END>", "in", "form", "<s>", ".", "▁Hey", ".", "▁▁▁▁▁▁", "▁.", ] assert tokenizer.encode("<REPR_END>inform<s>. Hey. .", add_special_tokens=False).ids == [ 32000, 262, 689, 1, 29889, 18637, 29889, 539, 869, ] assert tokenizer.encode("inform<s>. Hey. .").tokens == [ "<s>", "▁inform", "<s>", ".", "▁Hey", ".", "▁▁▁▁▁▁", "▁.", ] assert tokenizer.encode("inform<s>. Hey. .", add_special_tokens=False).tokens == [ "▁inform", "<s>", ".", "▁Hey", ".", "▁▁▁▁▁▁", "▁.", ] def test_decode_special(self): tokenizer = Tokenizer(BPE()) tokenizer.add_tokens([AddedToken("my", special=True), AddedToken("name", special=False), "is", "john", "pair"]) # Can decode single sequences output = tokenizer.decode([0, 1, 2, 3], skip_special_tokens=False) assert output == "my name is john" output = tokenizer.decode([0, 1, 2, 3], skip_special_tokens=True) assert output == "name is john" assert tokenizer.get_added_tokens_decoder()[0] == AddedToken("my", special=True) def test_setting_to_none(self): tokenizer = Tokenizer(BPE()) tokenizer.normalizer = Strip() tokenizer.normalizer = None assert tokenizer.normalizer == None tokenizer.pre_tokenizer = Metaspace() tokenizer.pre_tokenizer = None assert tokenizer.pre_tokenizer == None class TestTokenizerRepr: def test_repr(self): tokenizer = Tokenizer(BPE()) out = repr(tokenizer) assert ( out == 'Tokenizer(version="1.0", truncation=None, padding=None, added_tokens=[], normalizer=None, pre_tokenizer=None, post_processor=None, decoder=None, model=BPE(dropout=None, unk_token=None, continuing_subword_prefix=None, end_of_word_suffix=None, fuse_unk=False, byte_fallback=False, ignore_merges=False, vocab={}, merges=[]))' ) def test_repr_complete(self): tokenizer = Tokenizer(BPE()) tokenizer.pre_tokenizer = ByteLevel(add_prefix_space=True) tokenizer.post_processor = TemplateProcessing( single=["[CLS]", "$0", "[SEP]"], pair=["[CLS]:0", "$A", "[SEP]:0", "$B:1", "[SEP]:1"], special_tokens=[("[CLS]", 1), ("[SEP]", 0)], ) tokenizer.normalizer = Sequence([Lowercase(), Strip()]) out = repr(tokenizer) assert ( out == 'Tokenizer(version="1.0", truncation=None, padding=None, added_tokens=[], normalizer=Sequence(normalizers=[Lowercase(), Strip(strip_left=True, strip_right=True)]), pre_tokenizer=ByteLevel(add_prefix_space=True, trim_offsets=True, use_regex=True), post_processor=TemplateProcessing(single=[SpecialToken(id="[CLS]", type_id=0), Sequence(id=A, type_id=0), SpecialToken(id="[SEP]", type_id=0)], pair=[SpecialToken(id="[CLS]", type_id=0), Sequence(id=A, type_id=0), SpecialToken(id="[SEP]", type_id=0), Sequence(id=B, type_id=1), SpecialToken(id="[SEP]", type_id=1)], special_tokens={"[CLS]":SpecialToken(id="[CLS]", ids=[1], tokens=["[CLS]"]), "[SEP]":SpecialToken(id="[SEP]", ids=[0], tokens=["[SEP]"])}), decoder=None, model=BPE(dropout=None, unk_token=None, continuing_subword_prefix=None, end_of_word_suffix=None, fuse_unk=False, byte_fallback=False, ignore_merges=False, vocab={}, merges=[]))' )
tokenizers/bindings/python/tests/bindings/test_tokenizer.py/0
{ "file_path": "tokenizers/bindings/python/tests/bindings/test_tokenizer.py", "repo_id": "tokenizers", "token_count": 11643 }
320
- sections: - local: index title: 🤗 Tokenizers - local: quicktour title: Quicktour - local: installation title: Installation - local: pipeline title: The tokenization pipeline - local: components title: Components - local: training_from_memory title: Training from memory title: Getting started - sections: - local: api/input-sequences title: Input Sequences - local: api/encode-inputs title: Encode Inputs - local: api/tokenizer title: Tokenizer - local: api/encoding title: Encoding - local: api/added-tokens title: Added Tokens - local: api/models title: Models - local: api/normalizers title: Normalizers - local: api/pre-tokenizers title: Pre-tokenizers - local: api/post-processors title: Post-processors - local: api/trainers title: Trainers - local: api/decoders title: Decoders - local: api/visualizer title: Visualizer title: API
tokenizers/docs/source-doc-builder/_toctree.yml/0
{ "file_path": "tokenizers/docs/source-doc-builder/_toctree.yml", "repo_id": "tokenizers", "token_count": 338 }
321
# The tokenization pipeline When calling `Tokenizer.encode` or `Tokenizer.encode_batch`, the input text(s) go through the following pipeline: - `normalization` - `pre-tokenization` - `model` - `post-processing` We'll see in details what happens during each of those steps in detail, as well as when you want to `decode <decoding>` some token ids, and how the 🤗 Tokenizers library allows you to customize each of those steps to your needs. If you're already familiar with those steps and want to learn by seeing some code, jump to `our BERT from scratch example <example>`. For the examples that require a `Tokenizer` we will use the tokenizer we trained in the `quicktour`, which you can load with: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_pipeline.py", "language": "python", "start-after": "START reload_tokenizer", "end-before": "END reload_tokenizer", "dedent": 12} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START pipeline_reload_tokenizer", "end-before": "END pipeline_reload_tokenizer", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/pipeline.test.ts", "language": "js", "start-after": "START reload_tokenizer", "end-before": "END reload_tokenizer", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> ## Normalization Normalization is, in a nutshell, a set of operations you apply to a raw string to make it less random or "cleaner". Common operations include stripping whitespace, removing accented characters or lowercasing all text. If you're familiar with [Unicode normalization](https://unicode.org/reports/tr15), it is also a very common normalization operation applied in most tokenizers. Each normalization operation is represented in the 🤗 Tokenizers library by a `Normalizer`, and you can combine several of those by using a `normalizers.Sequence`. Here is a normalizer applying NFD Unicode normalization and removing accents as an example: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_pipeline.py", "language": "python", "start-after": "START setup_normalizer", "end-before": "END setup_normalizer", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START pipeline_setup_normalizer", "end-before": "END pipeline_setup_normalizer", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/pipeline.test.ts", "language": "js", "start-after": "START setup_normalizer", "end-before": "END setup_normalizer", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> You can manually test that normalizer by applying it to any string: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_pipeline.py", "language": "python", "start-after": "START test_normalizer", "end-before": "END test_normalizer", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START pipeline_test_normalizer", "end-before": "END pipeline_test_normalizer", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/pipeline.test.ts", "language": "js", "start-after": "START test_normalizer", "end-before": "END test_normalizer", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> When building a `Tokenizer`, you can customize its normalizer by just changing the corresponding attribute: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_pipeline.py", "language": "python", "start-after": "START replace_normalizer", "end-before": "END replace_normalizer", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START pipeline_replace_normalizer", "end-before": "END pipeline_replace_normalizer", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/pipeline.test.ts", "language": "js", "start-after": "START replace_normalizer", "end-before": "END replace_normalizer", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> Of course, if you change the way a tokenizer applies normalization, you should probably retrain it from scratch afterward. ## Pre-Tokenization Pre-tokenization is the act of splitting a text into smaller objects that give an upper bound to what your tokens will be at the end of training. A good way to think of this is that the pre-tokenizer will split your text into "words" and then, your final tokens will be parts of those words. An easy way to pre-tokenize inputs is to split on spaces and punctuations, which is done by the `pre_tokenizers.Whitespace` pre-tokenizer: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_pipeline.py", "language": "python", "start-after": "START setup_pre_tokenizer", "end-before": "END setup_pre_tokenizer", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START pipeline_setup_pre_tokenizer", "end-before": "END pipeline_setup_pre_tokenizer", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/pipeline.test.ts", "language": "js", "start-after": "START setup_pre_tokenizer", "end-before": "END setup_pre_tokenizer", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> The output is a list of tuples, with each tuple containing one word and its span in the original sentence (which is used to determine the final `offsets` of our `Encoding`). Note that splitting on punctuation will split contractions like `"I'm"` in this example. You can combine together any `PreTokenizer` together. For instance, here is a pre-tokenizer that will split on space, punctuation and digits, separating numbers in their individual digits: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_pipeline.py", "language": "python", "start-after": "START combine_pre_tokenizer", "end-before": "END combine_pre_tokenizer", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START pipeline_combine_pre_tokenizer", "end-before": "END pipeline_combine_pre_tokenizer", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/pipeline.test.ts", "language": "js", "start-after": "START combine_pre_tokenizer", "end-before": "END combine_pre_tokenizer", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> As we saw in the `quicktour`, you can customize the pre-tokenizer of a `Tokenizer` by just changing the corresponding attribute: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_pipeline.py", "language": "python", "start-after": "START replace_pre_tokenizer", "end-before": "END replace_pre_tokenizer", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START pipeline_replace_pre_tokenizer", "end-before": "END pipeline_replace_pre_tokenizer", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/pipeline.test.ts", "language": "js", "start-after": "START replace_pre_tokenizer", "end-before": "END replace_pre_tokenizer", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> Of course, if you change the way the pre-tokenizer, you should probably retrain your tokenizer from scratch afterward. ## Model Once the input texts are normalized and pre-tokenized, the `Tokenizer` applies the model on the pre-tokens. This is the part of the pipeline that needs training on your corpus (or that has been trained if you are using a pretrained tokenizer). The role of the model is to split your "words" into tokens, using the rules it has learned. It's also responsible for mapping those tokens to their corresponding IDs in the vocabulary of the model. This model is passed along when initializing the `Tokenizer` so you already know how to customize this part. Currently, the 🤗 Tokenizers library supports: - `models.BPE` - `models.Unigram` - `models.WordLevel` - `models.WordPiece` For more details about each model and its behavior, you can check [here](components#models) ## Post-Processing Post-processing is the last step of the tokenization pipeline, to perform any additional transformation to the `Encoding` before it's returned, like adding potential special tokens. As we saw in the quick tour, we can customize the post processor of a `Tokenizer` by setting the corresponding attribute. For instance, here is how we can post-process to make the inputs suitable for the BERT model: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_pipeline.py", "language": "python", "start-after": "START setup_processor", "end-before": "END setup_processor", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START pipeline_setup_processor", "end-before": "END pipeline_setup_processor", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/pipeline.test.ts", "language": "js", "start-after": "START setup_processor", "end-before": "END setup_processor", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> Note that contrarily to the pre-tokenizer or the normalizer, you don't need to retrain a tokenizer after changing its post-processor. ## All together: a BERT tokenizer from scratch Let's put all those pieces together to build a BERT tokenizer. First, BERT relies on WordPiece, so we instantiate a new `Tokenizer` with this model: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_pipeline.py", "language": "python", "start-after": "START bert_setup_tokenizer", "end-before": "END bert_setup_tokenizer", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START bert_setup_tokenizer", "end-before": "END bert_setup_tokenizer", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/pipeline.test.ts", "language": "js", "start-after": "START bert_setup_tokenizer", "end-before": "END bert_setup_tokenizer", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> Then we know that BERT preprocesses texts by removing accents and lowercasing. We also use a unicode normalizer: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_pipeline.py", "language": "python", "start-after": "START bert_setup_normalizer", "end-before": "END bert_setup_normalizer", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START bert_setup_normalizer", "end-before": "END bert_setup_normalizer", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/pipeline.test.ts", "language": "js", "start-after": "START bert_setup_normalizer", "end-before": "END bert_setup_normalizer", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> The pre-tokenizer is just splitting on whitespace and punctuation: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_pipeline.py", "language": "python", "start-after": "START bert_setup_pre_tokenizer", "end-before": "END bert_setup_pre_tokenizer", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START bert_setup_pre_tokenizer", "end-before": "END bert_setup_pre_tokenizer", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/pipeline.test.ts", "language": "js", "start-after": "START bert_setup_pre_tokenizer", "end-before": "END bert_setup_pre_tokenizer", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> And the post-processing uses the template we saw in the previous section: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_pipeline.py", "language": "python", "start-after": "START bert_setup_processor", "end-before": "END bert_setup_processor", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START bert_setup_processor", "end-before": "END bert_setup_processor", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/pipeline.test.ts", "language": "js", "start-after": "START bert_setup_processor", "end-before": "END bert_setup_processor", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> We can use this tokenizer and train on it on wikitext like in the `quicktour`: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_pipeline.py", "language": "python", "start-after": "START bert_train_tokenizer", "end-before": "END bert_train_tokenizer", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START bert_train_tokenizer", "end-before": "END bert_train_tokenizer", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/pipeline.test.ts", "language": "js", "start-after": "START bert_train_tokenizer", "end-before": "END bert_train_tokenizer", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> ## Decoding On top of encoding the input texts, a `Tokenizer` also has an API for decoding, that is converting IDs generated by your model back to a text. This is done by the methods `Tokenizer.decode` (for one predicted text) and `Tokenizer.decode_batch` (for a batch of predictions). The `decoder` will first convert the IDs back to tokens (using the tokenizer's vocabulary) and remove all special tokens, then join those tokens with spaces: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_pipeline.py", "language": "python", "start-after": "START test_decoding", "end-before": "END test_decoding", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START pipeline_test_decoding", "end-before": "END pipeline_test_decoding", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/pipeline.test.ts", "language": "js", "start-after": "START test_decoding", "end-before": "END test_decoding", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> If you used a model that added special characters to represent subtokens of a given "word" (like the `"##"` in WordPiece) you will need to customize the `decoder` to treat them properly. If we take our previous `bert_tokenizer` for instance the default decoding will give: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_pipeline.py", "language": "python", "start-after": "START bert_test_decoding", "end-before": "END bert_test_decoding", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START bert_test_decoding", "end-before": "END bert_test_decoding", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/pipeline.test.ts", "language": "js", "start-after": "START bert_test_decoding", "end-before": "END bert_test_decoding", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent> But by changing it to a proper decoder, we get: <tokenizerslangcontent> <python> <literalinclude> {"path": "../../bindings/python/tests/documentation/test_pipeline.py", "language": "python", "start-after": "START bert_proper_decoding", "end-before": "END bert_proper_decoding", "dedent": 8} </literalinclude> </python> <rust> <literalinclude> {"path": "../../tokenizers/tests/documentation.rs", "language": "rust", "start-after": "START bert_proper_decoding", "end-before": "END bert_proper_decoding", "dedent": 4} </literalinclude> </rust> <node> <literalinclude> {"path": "../../bindings/node/examples/documentation/pipeline.test.ts", "language": "js", "start-after": "START bert_proper_decoding", "end-before": "END bert_proper_decoding", "dedent": 8} </literalinclude> </node> </tokenizerslangcontent>
tokenizers/docs/source-doc-builder/pipeline.mdx/0
{ "file_path": "tokenizers/docs/source-doc-builder/pipeline.mdx", "repo_id": "tokenizers", "token_count": 5902 }
322
Documentation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The Rust API Reference is available directly on the `Docs.rs <https://docs.rs/tokenizers>`__ website.
tokenizers/docs/source/api/rust.inc/0
{ "file_path": "tokenizers/docs/source/api/rust.inc", "repo_id": "tokenizers", "token_count": 43 }
323
<p align="center"> <br> <img src="https://huggingface.co/landing/assets/tokenizers/tokenizers-logo.png" width="600"/> <br> <p> <p align="center"> <img alt="Build" src="https://github.com/huggingface/tokenizers/workflows/Rust/badge.svg"> <a href="https://github.com/huggingface/tokenizers/blob/master/LICENSE"> <img alt="GitHub" src="https://img.shields.io/github/license/huggingface/tokenizers.svg?color=blue"> </a> <a href="https://docs.rs/tokenizers/"> <img alt="Doc" src="https://docs.rs/tokenizers/badge.svg"> </a> </p> <br> The core of `tokenizers`, written in Rust. Provides an implementation of today's most used tokenizers, with a focus on performance and versatility. ## What is a Tokenizer A Tokenizer works as a pipeline, it processes some raw text as input and outputs an `Encoding`. The various steps of the pipeline are: 1. The `Normalizer`: in charge of normalizing the text. Common examples of normalization are the [unicode normalization standards](https://unicode.org/reports/tr15/#Norm_Forms), such as `NFD` or `NFKC`. More details about how to use the `Normalizers` are available on the [Hugging Face blog](https://huggingface.co/docs/tokenizers/components#normalizers) 2. The `PreTokenizer`: in charge of creating initial words splits in the text. The most common way of splitting text is simply on whitespace. 3. The `Model`: in charge of doing the actual tokenization. An example of a `Model` would be `BPE` or `WordPiece`. 4. The `PostProcessor`: in charge of post-processing the `Encoding` to add anything relevant that, for example, a language model would need, such as special tokens. ### Loading a pretrained tokenizer from the Hub ```rust use tokenizers::tokenizer::{Result, Tokenizer}; fn main() -> Result<()> { # #[cfg(feature = "http")] # { // needs http feature enabled let tokenizer = Tokenizer::from_pretrained("bert-base-cased", None)?; let encoding = tokenizer.encode("Hey there!", false)?; println!("{:?}", encoding.get_tokens()); # } Ok(()) } ``` ### Deserialization and tokenization example ```rust use tokenizers::tokenizer::{Result, Tokenizer, EncodeInput}; use tokenizers::models::bpe::BPE; fn main() -> Result<()> { let bpe_builder = BPE::from_file("./path/to/vocab.json", "./path/to/merges.txt"); let bpe = bpe_builder .dropout(0.1) .unk_token("[UNK]".into()) .build()?; let mut tokenizer = Tokenizer::new(bpe); let encoding = tokenizer.encode("Hey there!", false)?; println!("{:?}", encoding.get_tokens()); Ok(()) } ``` ### Training and serialization example ```rust use tokenizers::decoders::DecoderWrapper; use tokenizers::models::bpe::{BpeTrainerBuilder, BPE}; use tokenizers::normalizers::{strip::Strip, unicode::NFC, utils::Sequence, NormalizerWrapper}; use tokenizers::pre_tokenizers::byte_level::ByteLevel; use tokenizers::pre_tokenizers::PreTokenizerWrapper; use tokenizers::processors::PostProcessorWrapper; use tokenizers::{AddedToken, Model, Result, TokenizerBuilder}; use std::path::Path; fn main() -> Result<()> { let vocab_size: usize = 100; let mut trainer = BpeTrainerBuilder::new() .show_progress(true) .vocab_size(vocab_size) .min_frequency(0) .special_tokens(vec![ AddedToken::from(String::from("<s>"), true), AddedToken::from(String::from("<pad>"), true), AddedToken::from(String::from("</s>"), true), AddedToken::from(String::from("<unk>"), true), AddedToken::from(String::from("<mask>"), true), ]) .build(); let mut tokenizer = TokenizerBuilder::new() .with_model(BPE::default()) .with_normalizer(Some(Sequence::new(vec![ Strip::new(true, true).into(), NFC.into(), ]))) .with_pre_tokenizer(Some(ByteLevel::default())) .with_post_processor(Some(ByteLevel::default())) .with_decoder(Some(ByteLevel::default())) .build()?; let pretty = false; tokenizer .train_from_files( &mut trainer, vec!["path/to/vocab.txt".to_string()], )? .save("tokenizer.json", pretty)?; Ok(()) } ``` ## Additional information - tokenizers is designed to leverage CPU parallelism when possible. The level of parallelism is determined by the total number of core/threads your CPU provides but this can be tuned by setting the `RAYON_RS_NUM_THREADS` environment variable. As an example setting `RAYON_RS_NUM_THREADS=4` will allocate a maximum of 4 threads. **_Please note this behavior may evolve in the future_** ## Features - **progressbar**: The progress bar visualization is enabled by default. It might be disabled if compilation for certain targets is not supported by the [termios](https://crates.io/crates/termios) dependency of the [indicatif](https://crates.io/crates/indicatif) progress bar. - **http**: This feature enables downloading the tokenizer via HTTP. It is disabled by default. With this feature enabled, `Tokenizer::from_pretrained` becomes accessible.
tokenizers/tokenizers/README.md/0
{ "file_path": "tokenizers/tokenizers/README.md", "repo_id": "tokenizers", "token_count": 1890 }
324
#!/usr/bin/env node const { spawn } = require("child_process"); const fs = require("fs"); let folderName = '.'; if (process.argv.length >= 3) { folderName = process.argv[2]; if (!fs.existsSync(folderName)) { fs.mkdirSync(folderName); } } const clone = spawn("git", ["clone", "https://github.com/rustwasm/create-wasm-app.git", folderName]); clone.on("close", code => { if (code !== 0) { console.error("cloning the template failed!") process.exit(code); } else { console.log("🦀 Rust + 🕸 Wasm = ❤"); } });
tokenizers/tokenizers/examples/unstable_wasm/www/.bin/create-wasm-app.js/0
{ "file_path": "tokenizers/tokenizers/examples/unstable_wasm/www/.bin/create-wasm-app.js", "repo_id": "tokenizers", "token_count": 210 }
325
use crate::tokenizer::{Decoder, Result}; use monostate::MustBe; use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, Serialize, Deserialize, Default)] /// Fuse simply fuses all tokens into one big string. /// It's usually the last decoding step anyway, but this /// decoder exists incase some decoders need to happen after that /// step #[non_exhaustive] pub struct Fuse { #[serde(rename = "type")] type_: MustBe!("Fuse"), } impl Fuse { pub fn new() -> Self { Self { type_: MustBe!("Fuse"), } } } impl Decoder for Fuse { fn decode_chain(&self, tokens: Vec<String>) -> Result<Vec<String>> { let new_string = tokens.join(""); Ok(vec![new_string]) } } #[cfg(test)] mod tests { use super::*; #[test] fn decode() { let decoder = Fuse::new(); let res = decoder .decode_chain(vec!["Hey".into(), " friend!".into()]) .unwrap(); assert_eq!(res, vec!["Hey friend!"]); } }
tokenizers/tokenizers/src/decoders/fuse.rs/0
{ "file_path": "tokenizers/tokenizers/src/decoders/fuse.rs", "repo_id": "tokenizers", "token_count": 433 }
326
use crate::models::unigram::{lattice::Lattice, model::Unigram}; use crate::tokenizer::{AddedToken, Result, Trainer}; use crate::utils::parallelism::*; use crate::utils::progress::{ProgressBar, ProgressStyle}; use ahash::{AHashMap, AHashSet}; use log::debug; use serde::{Deserialize, Serialize}; use std::cmp::Reverse; use std::convert::TryInto; // A token and a score type SentencePiece = (String, f64); // A full sentence or word + it's count within the dataset type Sentence = (String, u32); fn digamma(mut x: f64) -> f64 { let mut result = 0.0; while x < 7.0 { result -= 1.0 / x; x += 1.0; } x -= 1.0 / 2.0; let xx = 1.0 / x; let xx2 = xx * xx; let xx4 = xx2 * xx2; result += x.ln() + (1.0 / 24.0) * xx2 - 7.0 / 960.0 * xx4 + (31.0 / 8064.0) * xx4 * xx2 - (127.0 / 30720.0) * xx4 * xx4; result } #[derive(thiserror::Error, Debug)] pub enum UnigramTrainerError { #[error("The vocabulary is not large enough to contain all chars")] VocabularyTooSmall, } fn to_log_prob(pieces: &mut [SentencePiece]) { let sum: f64 = pieces.iter().map(|(_, score)| score).sum(); let logsum = sum.ln(); for (_, score) in pieces.iter_mut() { *score = score.ln() - logsum; } } /// A `UnigramTrainer` can train a `Unigram` model from `word_counts`. #[non_exhaustive] #[derive(Builder, Debug, Clone, Serialize, Deserialize)] pub struct UnigramTrainer { #[builder(default = "true")] pub show_progress: bool, #[builder(default = "8000")] pub vocab_size: u32, #[builder(default = "2")] pub n_sub_iterations: u32, #[builder(default = "0.75")] pub shrinking_factor: f64, #[builder(default = "vec![]")] pub special_tokens: Vec<AddedToken>, #[builder(default = "AHashSet::new()")] pub initial_alphabet: AHashSet<char>, #[builder(default = "None")] pub unk_token: Option<String>, #[builder(default = "16")] pub max_piece_length: usize, #[builder(default = "1_000_000")] seed_size: usize, #[builder(default = "AHashMap::new()")] words: AHashMap<String, u32>, } impl Default for UnigramTrainer { fn default() -> Self { Self::builder().build().unwrap() } } impl UnigramTrainer { pub fn builder() -> UnigramTrainerBuilder { UnigramTrainerBuilder::default() } /// Setup a progress bar if asked to show progress fn setup_progress(&self) -> Option<ProgressBar> { if self.show_progress { let p = ProgressBar::new(0); p.set_style( ProgressStyle::default_bar() .template("[{elapsed_precise}] {msg:<30!} {wide_bar} {pos:<9!}/{len:>9!}") .expect("Invalid progress template"), ); Some(p) } else { None } } fn is_valid_sentencepiece(&self, char_string: &[char]) -> bool { // Checks string length // Space not in the substring, numbers, hiragana and more should be taken // care of within pre_tokenizers. // https://github.com/google/sentencepiece/blob/26be9516cd81d5315ee31c48d2438018e0eab879/src/trainer_interface.cc#L203 let n = char_string.len(); if char_string.is_empty() || n > self.max_piece_length { return false; } true } fn finalize(&self, model: Unigram, required_chars: AHashSet<String>) -> Result<Unigram> { let mut min_score_penalty = 0.0; let min_score_penalty_delta = 0.0001; let mut pieces: Vec<(String, f64)> = vec![]; let mut inserted: AHashSet<String> = AHashSet::new(); // We don't want to include the <UNK> that was used to train inserted.insert("<UNK>".into()); let existing_pieces: AHashMap<String, f64> = model.iter().cloned().collect(); for c in required_chars { if let Some(t) = existing_pieces.get(&c) { inserted.insert(c.clone()); pieces.push((c, *t)); } else { let score = model.min_score + min_score_penalty; inserted.insert(c.clone()); pieces.push((c, score)); min_score_penalty += min_score_penalty_delta; } } let (unk_id, need_add_unk) = if let Some(ref unk) = self.unk_token { let unk_id = self.special_tokens.iter().enumerate().find_map(|(i, t)| { if t.content == *unk { Some(i) } else { None } }); match unk_id { Some(id) => (Some(id), false), None => (Some(0), true), } } else { (None, false) }; let vocab_size_without_special_tokens = if need_add_unk { self.vocab_size as usize - self.special_tokens.len() - 1 } else { self.vocab_size as usize - self.special_tokens.len() }; for (token, score) in model.iter() { if inserted.contains::<str>(token) { continue; } inserted.insert(token.to_string()); pieces.push((token.to_string(), if score.is_nan() { 0.0 } else { *score })); if pieces.len() == vocab_size_without_special_tokens { break; } } pieces.sort_by(|(_, a), (_, b)| b.partial_cmp(a).unwrap()); // Insert the necessary tokens let mut special_tokens = self .special_tokens .iter() .map(|t| (t.content.clone(), 0.0)) .collect::<Vec<_>>(); if need_add_unk { special_tokens.insert(0, (self.unk_token.clone().unwrap(), 0.0)); } Unigram::from( special_tokens.into_iter().chain(pieces).collect(), unk_id, model.byte_fallback(), ) } fn required_chars(&self, word_counts: &[Sentence]) -> AHashSet<String> { word_counts .iter() .flat_map(|(s, _count)| s.chars()) .chain(self.initial_alphabet.iter().copied()) .map(|c| c.to_string()) .collect() } fn make_seed_sentence_pieces( &self, sentences: &[Sentence], _progress: &Option<ProgressBar>, ) -> Vec<SentencePiece> { // Put all sentences in a string, separated by \0 let total: usize = sentences .iter() .map(|(s, _)| s.chars().count()) .sum::<usize>() + sentences.len(); let mut flat_string = String::with_capacity(total); let mut all_chars: AHashMap<char, u32> = AHashMap::new(); let c_sentence_boundary = '\0'; let k_sentence_boundary = '\0'.to_string(); for (string, n) in sentences { if string.is_empty() { continue; } flat_string.push_str(string); // XXX // Comment suggests we add sentence boundary, but it seems to be missing from actual // code in spm. flat_string.push_str(&k_sentence_boundary); for c in string.chars() { if c != c_sentence_boundary { *all_chars.entry(c).or_insert(0) += n; } } } flat_string.shrink_to_fit(); #[cfg(feature = "esaxx_fast")] let suffix = esaxx_rs::suffix(&flat_string).unwrap(); #[cfg(not(feature = "esaxx_fast"))] let suffix = esaxx_rs::suffix_rs(&flat_string).unwrap(); // Basic chars need to be in sentence pieces. let mut seed_sentencepieces: Vec<SentencePiece> = vec![]; let mut sall_chars: Vec<_> = all_chars.into_iter().map(|(a, b)| (b, a)).collect(); // Reversed order sall_chars.sort_by_key(|&a| Reverse(a)); let mut substr_index: Vec<_> = suffix .iter() .filter_map(|(string, freq)| { if string.len() <= 1 { return None; } if string.contains(&c_sentence_boundary) { return None; } if !self.is_valid_sentencepiece(string) { return None; } let score = freq * string.len() as u32; // if let Some(p) = &progress { // p.inc(1); // } Some((score, string)) }) .collect(); // Fill seed_sentencepieces for (count, character) in sall_chars { seed_sentencepieces.push((character.to_string(), count.into())); } // sort by decreasing score substr_index.sort_by_key(|&a| Reverse(a)); for (score, char_string) in substr_index { // Just in case assert!(self.is_valid_sentencepiece(char_string)); let string: String = char_string.iter().collect(); seed_sentencepieces.push((string, score.into())); if seed_sentencepieces.len() >= self.seed_size { break; } } to_log_prob(&mut seed_sentencepieces); seed_sentencepieces } fn prune_sentence_pieces( &self, model: &Unigram, pieces: &[SentencePiece], sentences: &[Sentence], ) -> Vec<SentencePiece> { let mut always_keep = vec![true; pieces.len()]; let mut alternatives: Vec<Vec<usize>> = vec![Vec::new(); pieces.len()]; let bos_id = pieces.len() + 1; let eos_id = pieces.len() + 2; // First, segments the current sentencepieces to know // how each sentencepiece is resegmented if this sentencepiece is removed // from the vocabulary. // To do so, we take the second best segmentation of sentencepiece[i]. // alternatives[i] stores the sequence of second best sentencepieces. for (id, (token, _score)) in pieces.iter().enumerate() { // Always keep unk. if id == 0 { always_keep[id] = false; continue; } let mut lattice = Lattice::from(token, bos_id, eos_id); model.populate_nodes(&mut lattice); let nbests = lattice.nbest(2); if nbests.len() == 1 { always_keep[id] = true; } else if nbests[0].len() >= 2 { always_keep[id] = false; } else if nbests[0].len() == 1 { always_keep[id] = true; for node in &nbests[1] { let alt_id = node.borrow().id; alternatives[id].push(alt_id); } } } // Second, segments all sentences to compute likelihood // with a unigram language model. inverted[i] stores // the set of sentence index where the sentencepieces[i] appears. let chunk_size = std::cmp::max(sentences.len() / current_num_threads(), 1); let indexed_sentences: Vec<(usize, &Sentence)> = sentences.iter().enumerate().collect(); let collected: (f64, Vec<f64>, Vec<Vec<usize>>) = indexed_sentences .maybe_par_chunks(chunk_size) .map(|enumerated_sentence_count_chunk| { let mut vsum = 0.0; let mut freq: Vec<f64> = vec![0.0; pieces.len()]; let mut inverted: Vec<Vec<usize>> = vec![Vec::new(); pieces.len()]; for (i, (sentence, count)) in enumerated_sentence_count_chunk { let mut lattice = Lattice::from(sentence, bos_id, eos_id); model.populate_nodes(&mut lattice); vsum += *count as f64; for node_ref in lattice.viterbi() { let id = node_ref.borrow().id; freq[id] += *count as f64; inverted[id].push(*i); } } (vsum, freq, inverted) }) .reduce( || (0.0, vec![0.0; pieces.len()], vec![Vec::new(); pieces.len()]), |(vsum, freq, inverted), (lvsum, lfreq, linverted)| { ( vsum + lvsum, freq.iter() .zip(lfreq) .map(|(global_el, local_el)| global_el + local_el) .collect(), inverted .iter() .zip(linverted) .map(|(global_el, local_el)| [&global_el[..], &local_el[..]].concat()) .collect(), ) }, ); let (vsum, freq, inverted) = collected; let sum: f64 = freq.iter().sum(); let logsum = sum.ln(); let mut candidates: Vec<(usize, f64)> = vec![]; let mut new_pieces: Vec<SentencePiece> = Vec::with_capacity(self.vocab_size as usize); new_pieces.push(pieces[0].clone()); // Finally, computes how likely the LM likelihood is reduced if // the sentencepiece[i] is removed from the vocabulary. // Since the exact computation of loss is difficult, we compute the // loss approximately by assuming that all sentencepiece[i] in the sentences // are replaced with alternatives[i] when sentencepiece[i] is removed. for (id, (token, score)) in pieces.iter().enumerate() { if id == 0 { continue; } if freq[id] == 0.0 && !always_keep[id] { // not found in Viterbi path. Can remove this entry safely. continue; } else if alternatives[id].is_empty() { // no alternatives. Keeps this entry. new_pieces.push((token.to_string(), *score)); } else { let mut f = 0.0; // the frequency of pieces[i]; for n in &inverted[id] { let score = sentences[*n].1 as f64; f += score; } // TODO: Temporary hack to avoid Nans. if f == 0.0 || f.is_nan() { // new_pieces.push((token.to_string(), *score)); continue; } f /= vsum; // normalizes by all sentence frequency. let logprob_sp = freq[id].ln() - logsum; // After removing the sentencepiece[i], its frequency freq[i] is // re-assigned to alternatives. // new_sum = current_sum - freq[i] + freq[i] * alternatives.size() // = current_sum + freq[i] (alternatives - 1) let logsum_alt = (sum + freq[id] * (alternatives.len() - 1) as f64).ln(); // The frequencies of alternatives are increased by freq[i]. let mut logprob_alt = 0.0; for n in &alternatives[id] { logprob_alt += (freq[*n] + freq[id]).ln() - logsum_alt; } // loss: the diff of likelihood after removing the sentencepieces[i]. let loss = f * (logprob_sp - logprob_alt); if loss.is_nan() { panic!(""); } candidates.push((id, loss)); } } let desired_vocab_size: usize = (self.vocab_size as usize * 11) / 10; // * 1.1 let pruned_size: usize = ((pieces.len() as f64) * self.shrinking_factor) as usize; let pruned_size = desired_vocab_size.max(pruned_size); candidates.sort_by(|(_, a), (_, b)| b.partial_cmp(a).unwrap()); for (id, _score) in candidates { if new_pieces.len() == pruned_size { break; } new_pieces.push(pieces[id].clone()); } new_pieces } /// Update the progress bar with the new provided length and message fn update_progress(&self, p: &Option<ProgressBar>, len: usize, message: &'static str) { if let Some(p) = p { p.set_message(message); p.set_length(len as u64); p.reset(); } } /// Set the progress bar in the finish state fn finalize_progress(&self, p: &Option<ProgressBar>, final_len: usize) { if let Some(p) = p { p.set_length(final_len as u64); p.finish(); println!(); } } fn run_e_step(&self, model: &Unigram, sentences: &[Sentence]) -> (f64, u32, Vec<f64>) { let all_sentence_freq: u32 = sentences.iter().map(|(_a, b)| *b).sum(); let chunk_size = std::cmp::max(sentences.len() / current_num_threads(), 1); let collected: (f64, u32, Vec<f64>) = sentences .maybe_par_chunks(chunk_size) .map(|sentences_chunk| { let mut expected: Vec<f64> = vec![0.0; model.len()]; let mut objs: f64 = 0.0; let mut ntokens: u32 = 0; for (string, freq) in sentences_chunk { let mut lattice = Lattice::from(string, model.bos_id, model.eos_id); model.populate_nodes(&mut lattice); let z: f64 = lattice.populate_marginal(*freq as f64, &mut expected); if z.is_nan() { panic!("likelihood is NAN. Input sentence may be too long."); } ntokens += lattice.viterbi().len() as u32; objs -= z / (all_sentence_freq as f64); } (objs, ntokens, expected) }) .reduce( || (0.0, 0, vec![0.0; model.len()]), |(objs, ntokens, expected), (lobjs, lntokens, lexpected)| { ( objs + lobjs, ntokens + lntokens, expected .iter() .zip(lexpected) .map(|(global_el, local_el)| global_el + local_el) .collect(), ) }, ); collected } fn run_m_step(&self, pieces: &[SentencePiece], expected: &[f64]) -> Vec<SentencePiece> { if pieces.len() != expected.len() { panic!( "Those two iterators are supposed to be the same length ({} vs {})", pieces.len(), expected.len() ); } let mut new_pieces: Vec<SentencePiece> = Vec::with_capacity(self.vocab_size.try_into().unwrap()); let mut sum = 0.0; let expected_frequency_threshold = 0.5; for (i, (freq, (piece, _score))) in expected.iter().zip(pieces).enumerate() { // Always keep unk. if i == 0 { new_pieces.push((piece.clone(), f64::NAN)); continue; } if *freq < expected_frequency_threshold { continue; } new_pieces.push((piece.clone(), *freq)); sum += freq; } // // Here we do not use the original EM, but use the // // Bayesianified/DPified EM algorithm. // // https://cs.stanford.edu/~pliang/papers/tutorial-acl2007-talk.pdf // // This modification will act as a sparse prior. let logsum = digamma(sum); let new_pieces: Vec<_> = new_pieces .into_iter() .map(|(s, c)| (s, digamma(c) - logsum)) .collect(); new_pieces } pub fn do_train( &self, sentences: Vec<Sentence>, model: &mut Unigram, ) -> Result<Vec<AddedToken>> { let progress = self.setup_progress(); // // 1. Compute frequent substrings // TODO Should be able to upgrade to u64 when needed self.update_progress(&progress, sentences.len(), "Suffix array seeds"); let mut pieces: Vec<SentencePiece> = Vec::with_capacity(self.vocab_size.try_into().unwrap()); // We use a UNK token when training, whatever the `self.unk_token` pieces.push(("<UNK>".into(), f64::NAN)); pieces.extend(self.make_seed_sentence_pieces(&sentences, &progress)); self.finalize_progress(&progress, sentences.len()); // Useful to check compatibility with spm. debug!( "Using {} pieces on {} sentences for EM training", pieces.len(), sentences.len() ); let desired_vocab_size: usize = (self.vocab_size as usize * 11) / 10; // * 1.1 // 2. Run E-M Loops to fine grain the pieces. // We will shrink the vocab by shrinking_factor every loop on average // Some other pieces are dropped if logprob is too small // V = N * (f)**k // k = log(V / N) / log(f) let expected_loops = (((desired_vocab_size as f64).ln() - (pieces.len() as f64).ln()) / self.shrinking_factor.ln()) as usize + 1; let expected_updates = expected_loops * self.n_sub_iterations as usize; self.update_progress(&progress, expected_updates, "EM training"); let required_chars = self.required_chars(&sentences); if required_chars.len() as u32 > self.vocab_size { return Err(Box::new(UnigramTrainerError::VocabularyTooSmall)); } let mut new_model = Unigram::from(pieces.clone(), Some(0), false)?; loop { // Sub-EM iteration. for _iter in 0..self.n_sub_iterations { // Executes E step let (_objective, _num_tokens, expected) = self.run_e_step(&new_model, &sentences); // Executes M step. pieces = self.run_m_step(&pieces, &expected); new_model = Unigram::from(pieces.clone(), Some(0), false)?; // Useful comment for checking compatibility with spm debug!( "Em iter={} size={} obj={} num_tokens={} num_tokens/piece={}", _iter, new_model.len(), _objective, _num_tokens, _num_tokens as f64 / model.len() as f64 ); if let Some(p) = &progress { p.inc(1); } } // end of Sub EM iteration // Stops the iteration when the size of sentences reaches to the // desired symbol size. if pieces.len() <= desired_vocab_size { break; } // Prunes pieces. pieces = self.prune_sentence_pieces(&new_model, &pieces, &sentences); new_model = Unigram::from(pieces.clone(), Some(0), false)?; } self.finalize_progress(&progress, expected_updates); // Finally, adjusts the size of sentencepices to be |vocab_size|. *model = self.finalize(new_model, required_chars)?; Ok(self.special_tokens.clone()) } } impl Trainer for UnigramTrainer { type Model = Unigram; /// Train a Unigram model fn train(&self, model: &mut Unigram) -> Result<Vec<AddedToken>> { let sentences: Vec<_> = self.words.iter().map(|(s, i)| (s.to_owned(), *i)).collect(); self.do_train(sentences, model) } /// Whether we should show progress fn should_show_progress(&self) -> bool { self.show_progress } fn feed<I, S, F>(&mut self, iterator: I, process: F) -> Result<()> where I: Iterator<Item = S> + Send, S: AsRef<str> + Send, F: Fn(&str) -> Result<Vec<String>> + Sync, { let words: Result<AHashMap<String, u32>> = iterator .maybe_par_bridge() .map(|sequence| { let words = process(sequence.as_ref())?; let mut map = AHashMap::new(); for word in words { *map.entry(word).or_default() += 1; } Ok(map) }) .reduce( || Ok(AHashMap::new()), |acc, ws| { let mut acc = acc?; for (k, v) in ws? { *acc.entry(k).or_default() += v; } Ok(acc) }, ); self.words = words?; Ok(()) } } #[cfg(test)] mod tests { use super::*; use assert_approx_eq::assert_approx_eq; use std::iter::FromIterator; #[test] fn test_unigram_chars() { let trainer = UnigramTrainerBuilder::default() .show_progress(false) .build() .unwrap(); let sentences = vec![ ("This is a".to_string(), 1), ("こんにちは友達".to_string(), 1), ]; let required_chars = trainer.required_chars(&sentences); assert_eq!(required_chars.len(), 13); let progress = None; let table = trainer.make_seed_sentence_pieces(&sentences, &progress); let target_strings = vec![ "s", "i", " ", "達", "友", "ん", "は", "に", "ち", "こ", "h", "a", "T", "is ", "s ", ]; let strings: Vec<_> = table.iter().map(|(string, _)| string).collect(); assert_eq!(strings, target_strings); let scores = table.iter().map(|(_, score)| score); let target_scores = vec![ -2.5649493574615367, // 2.0 -2.5649493574615367, // 2.0 -2.5649493574615367, // 2.0 -3.258096538021482, // 1.0 -3.258096538021482, // 1.0 -3.258096538021482, // 1.0 -3.258096538021482, // 1.0 -3.258096538021482, // 1.0 -3.258096538021482, // 1.0 -3.258096538021482, // 1.0 -3.258096538021482, // 1.0 -3.258096538021482, // 1.0 -3.258096538021482, // 1.0 -1.4663370687934272, // 6.0 -1.8718021769015916, // 4.0 ]; for (score, target_score) in scores.zip(target_scores) { assert_approx_eq!(*score, target_score, 0.01); } } #[test] fn test_initial_alphabet() { let trainer = UnigramTrainerBuilder::default() .show_progress(false) .initial_alphabet(AHashSet::from_iter(vec!['a', 'b', 'c', 'd', 'e', 'f'])) .build() .unwrap(); let sentences = vec![("こんにちは友達".to_string(), 1)]; let required_chars = trainer.required_chars(&sentences); assert_eq!( required_chars, vec!["こ", "ん", "に", "ち", "は", "友", "達", "a", "b", "c", "d", "e", "f"] .into_iter() .map(|s| s.to_owned()) .collect::<AHashSet<_>>() ); } #[test] fn test_unk_token() { // 1. Should add `unk_token` as first special token let trainer = UnigramTrainerBuilder::default() .show_progress(false) .special_tokens(vec![ AddedToken::from("[SEP]", true), AddedToken::from("[CLS]", true), ]) .unk_token(Some("[UNK]".into())) .build() .unwrap(); let mut unigram = Unigram::default(); trainer .do_train(vec![("The".into(), 12), ("are".into(), 11)], &mut unigram) .unwrap(); let mut pieces = unigram.iter(); assert_eq!(pieces.next(), Some(&("[UNK]".into(), 0.0))); assert_eq!(pieces.next(), Some(&("[SEP]".into(), 0.0))); assert_eq!(pieces.next(), Some(&("[CLS]".into(), 0.0))); // 2. Let it where it is let trainer = UnigramTrainerBuilder::default() .show_progress(false) .special_tokens(vec![ AddedToken::from("[SEP]", true), AddedToken::from("[CLS]", true), AddedToken::from("[UNK]", true), ]) .unk_token(Some("[UNK]".into())) .build() .unwrap(); let mut unigram = Unigram::default(); trainer .do_train(vec![("The".into(), 12), ("are".into(), 11)], &mut unigram) .unwrap(); let mut pieces = unigram.iter(); assert_eq!(pieces.next(), Some(&("[SEP]".into(), 0.0))); assert_eq!(pieces.next(), Some(&("[CLS]".into(), 0.0))); assert_eq!(pieces.next(), Some(&("[UNK]".into(), 0.0))); // 3. Don't put it there if not needed let trainer = UnigramTrainerBuilder::default() .show_progress(false) .build() .unwrap(); let mut unigram = Unigram::default(); trainer .do_train(vec![("The".into(), 12), ("are".into(), 11)], &mut unigram) .unwrap(); let mut pieces = unigram.iter(); assert_eq!(pieces.next().unwrap().0, "e".to_string()); } #[test] fn test_special_tokens() { let trainer = UnigramTrainerBuilder::default() .show_progress(false) .special_tokens(vec![ AddedToken::from("[SEP]", true), AddedToken::from("[CLS]", true), ]) .build() .unwrap(); let mut unigram = Unigram::default(); trainer .do_train(vec![("The".into(), 12), ("are".into(), 11)], &mut unigram) .unwrap(); let mut pieces = unigram.iter(); assert_eq!(pieces.next(), Some(&("[SEP]".into(), 0.0))); assert_eq!(pieces.next(), Some(&("[CLS]".into(), 0.0))); } #[test] fn test_to_log_prob() { let mut a = vec![("".to_string(), 1.0), ("".to_string(), 2.0)]; to_log_prob(&mut a); let scores = a.iter().map(|(_, score)| *score).collect::<Vec<_>>(); // ln(1) - ln(3) assert_approx_eq!(scores[0], -1.098, 0.01); // ln(2) - ln(3) assert_approx_eq!(scores[1], -0.405, 0.01); } }
tokenizers/tokenizers/src/models/unigram/trainer.rs/0
{ "file_path": "tokenizers/tokenizers/src/models/unigram/trainer.rs", "repo_id": "tokenizers", "token_count": 15668 }
327
use serde::{Deserialize, Serialize}; use crate::normalizers::NormalizerWrapper; use crate::tokenizer::{NormalizedString, Normalizer, Result}; use crate::utils::macro_rules_attribute; #[derive(Clone, Deserialize, Debug, Serialize)] #[serde(tag = "type")] /// Allows concatenating multiple other Normalizer as a Sequence. /// All the normalizers run in sequence in the given order against the same NormalizedString. pub struct Sequence { normalizers: Vec<NormalizerWrapper>, } impl Sequence { pub fn new(normalizers: Vec<NormalizerWrapper>) -> Self { Self { normalizers } } } impl AsRef<[NormalizerWrapper]> for Sequence { fn as_ref(&self) -> &[NormalizerWrapper] { &self.normalizers } } impl AsMut<[NormalizerWrapper]> for Sequence { fn as_mut(&mut self) -> &mut [NormalizerWrapper] { &mut self.normalizers } } impl IntoIterator for Sequence { type Item = NormalizerWrapper; type IntoIter = std::vec::IntoIter<Self::Item>; fn into_iter(self) -> Self::IntoIter { self.normalizers.into_iter() } } impl Normalizer for Sequence { fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> { for normalizer in &self.normalizers { normalizer.normalize(normalized)?; } Ok(()) } } /// Lowercases the input #[derive(Copy, Clone, Debug)] #[macro_rules_attribute(impl_serde_type!)] pub struct Lowercase; impl Normalizer for Lowercase { fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> { normalized.lowercase(); Ok(()) } }
tokenizers/tokenizers/src/normalizers/utils.rs/0
{ "file_path": "tokenizers/tokenizers/src/normalizers/utils.rs", "repo_id": "tokenizers", "token_count": 591 }
328
pub mod bert; pub mod roberta; pub mod sequence; pub mod template; // Re-export these as processors pub use super::pre_tokenizers::byte_level; use serde::{Deserialize, Serialize}; use crate::pre_tokenizers::byte_level::ByteLevel; use crate::processors::bert::BertProcessing; use crate::processors::roberta::RobertaProcessing; use crate::processors::sequence::Sequence; use crate::processors::template::TemplateProcessing; use crate::{Encoding, PostProcessor, Result}; #[derive(Serialize, Deserialize, PartialEq, Debug, Clone, Eq)] #[serde(untagged)] pub enum PostProcessorWrapper { // Roberta must be before Bert for deserialization (serde does not validate tags) Roberta(RobertaProcessing), Bert(BertProcessing), ByteLevel(ByteLevel), Template(TemplateProcessing), Sequence(Sequence), } impl PostProcessor for PostProcessorWrapper { fn added_tokens(&self, is_pair: bool) -> usize { match self { Self::Bert(bert) => bert.added_tokens(is_pair), Self::ByteLevel(bl) => bl.added_tokens(is_pair), Self::Roberta(roberta) => roberta.added_tokens(is_pair), Self::Template(template) => template.added_tokens(is_pair), Self::Sequence(bl) => bl.added_tokens(is_pair), } } fn process_encodings( &self, encodings: Vec<Encoding>, add_special_tokens: bool, ) -> Result<Vec<Encoding>> { match self { Self::Bert(bert) => bert.process_encodings(encodings, add_special_tokens), Self::ByteLevel(bl) => bl.process_encodings(encodings, add_special_tokens), Self::Roberta(roberta) => roberta.process_encodings(encodings, add_special_tokens), Self::Template(template) => template.process_encodings(encodings, add_special_tokens), Self::Sequence(bl) => bl.process_encodings(encodings, add_special_tokens), } } } impl_enum_from!(BertProcessing, PostProcessorWrapper, Bert); impl_enum_from!(ByteLevel, PostProcessorWrapper, ByteLevel); impl_enum_from!(RobertaProcessing, PostProcessorWrapper, Roberta); impl_enum_from!(TemplateProcessing, PostProcessorWrapper, Template); impl_enum_from!(Sequence, PostProcessorWrapper, Sequence); #[cfg(test)] mod tests { use super::*; #[test] fn deserialize_bert_roberta_correctly() { let roberta = RobertaProcessing::default(); let roberta_r = r#"{ "type":"RobertaProcessing", "sep":["</s>",2], "cls":["<s>",0], "trim_offsets":true, "add_prefix_space":true }"# .replace(char::is_whitespace, ""); assert_eq!(serde_json::to_string(&roberta).unwrap(), roberta_r); assert_eq!( serde_json::from_str::<PostProcessorWrapper>(&roberta_r).unwrap(), PostProcessorWrapper::Roberta(roberta) ); let bert = BertProcessing::default(); let bert_r = r#"{"type":"BertProcessing","sep":["[SEP]",102],"cls":["[CLS]",101]}"#; assert_eq!(serde_json::to_string(&bert).unwrap(), bert_r); assert_eq!( serde_json::from_str::<PostProcessorWrapper>(bert_r).unwrap(), PostProcessorWrapper::Bert(bert) ); } #[test] fn post_processor_deserialization_no_type() { let json = r#"{"add_prefix_space": true, "trim_offsets": false, "use_regex": false}"#; let reconstructed = serde_json::from_str::<PostProcessorWrapper>(json); match reconstructed { Err(err) => assert_eq!( err.to_string(), "data did not match any variant of untagged enum PostProcessorWrapper" ), _ => panic!("Expected an error here"), } let json = r#"{"sep":["[SEP]",102],"cls":["[CLS]",101]}"#; let reconstructed = serde_json::from_str::<PostProcessorWrapper>(json); assert!(matches!( reconstructed.unwrap(), PostProcessorWrapper::Bert(_) )); let json = r#"{"sep":["</s>",2], "cls":["<s>",0], "trim_offsets":true, "add_prefix_space":true}"#; let reconstructed = serde_json::from_str::<PostProcessorWrapper>(json); assert!(matches!( reconstructed.unwrap(), PostProcessorWrapper::Roberta(_) )); let json = r#"{"type":"RobertaProcessing", "sep":["</s>",2] }"#; let reconstructed = serde_json::from_str::<PostProcessorWrapper>(json); match reconstructed { Err(err) => assert_eq!( err.to_string(), "data did not match any variant of untagged enum PostProcessorWrapper" ), _ => panic!("Expected an error here"), } } }
tokenizers/tokenizers/src/processors/mod.rs/0
{ "file_path": "tokenizers/tokenizers/src/processors/mod.rs", "repo_id": "tokenizers", "token_count": 2147 }
329
use crate::tokenizer::pattern::Pattern; use crate::{Offsets, Result}; use onig::Regex; use std::error::Error; #[derive(Debug)] pub struct SysRegex { regex: Regex, } impl SysRegex { pub fn find_iter<'r, 't>(&'r self, inside: &'t str) -> onig::FindMatches<'r, 't> { self.regex.find_iter(inside) } pub fn new( regex_str: &str, ) -> std::result::Result<Self, Box<dyn Error + Send + Sync + 'static>> { Ok(Self { regex: Regex::new(regex_str)?, }) } } impl Pattern for &Regex { fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> { if inside.is_empty() { return Ok(vec![((0, 0), false)]); } let mut prev = 0; let mut splits = Vec::with_capacity(inside.len()); for (start, end) in self.find_iter(inside) { if prev != start { splits.push(((prev, start), false)); } splits.push(((start, end), true)); prev = end; } if prev != inside.len() { splits.push(((prev, inside.len()), false)) } Ok(splits) } }
tokenizers/tokenizers/src/utils/onig.rs/0
{ "file_path": "tokenizers/tokenizers/src/utils/onig.rs", "repo_id": "tokenizers", "token_count": 571 }
330
<h3 align="center"> <p>State-of-the-art Machine Learning for the Web</p> </h3> Run 🤗 Transformers directly in your browser, with no need for a server! Transformers.js is designed to be functionally equivalent to Hugging Face's [transformers](https://github.com/huggingface/transformers) python library, meaning you can run the same pretrained models using a very similar API. These models support common tasks in different modalities, such as: - 📝 **Natural Language Processing**: text classification, named entity recognition, question answering, language modeling, summarization, translation, multiple choice, and text generation. - 🖼️ **Computer Vision**: image classification, object detection, segmentation, and depth estimation. - 🗣️ **Audio**: automatic speech recognition, audio classification, and text-to-speech. - 🐙 **Multimodal**: embeddings, zero-shot audio classification, zero-shot image classification, and zero-shot object detection. Transformers.js uses [ONNX Runtime](https://onnxruntime.ai/) to run models in the browser. The best part about it, is that you can easily [convert](#convert-your-models-to-onnx) your pretrained PyTorch, TensorFlow, or JAX models to ONNX using [🤗 Optimum](https://github.com/huggingface/optimum#onnx--onnx-runtime). For more information, check out the full [documentation](https://huggingface.co/docs/transformers.js).
transformers.js/docs/snippets/0_introduction.snippet/0
{ "file_path": "transformers.js/docs/snippets/0_introduction.snippet", "repo_id": "transformers.js", "token_count": 382 }
331
# The `pipeline` API Just like the [transformers Python library](https://github.com/huggingface/transformers), Transformers.js provides users with a simple way to leverage the power of transformers. The `pipeline()` function is the easiest and fastest way to use a pretrained model for inference. <Tip> For the full list of available tasks/pipelines, check out [this table](#available-tasks). </Tip> ## The basics Start by creating an instance of `pipeline()` and specifying a task you want to use it for. For example, to create a sentiment analysis pipeline, you can do: ```javascript import { pipeline } from '@huggingface/transformers'; const classifier = await pipeline('sentiment-analysis'); ``` When running for the first time, the `pipeline` will download and cache the default pretrained model associated with the task. This can take a while, but subsequent calls will be much faster. <Tip> By default, models will be downloaded from the [Hugging Face Hub](https://huggingface.co/models) and stored in [browser cache](https://developer.mozilla.org/en-US/docs/Web/API/Cache), but there are ways to specify custom models and cache locations. For more information see [here](./custom_usage). </Tip> You can now use the classifier on your target text by calling it as a function: ```javascript const result = await classifier('I love transformers!'); // [{'label': 'POSITIVE', 'score': 0.9998}] ``` If you have multiple inputs, you can pass them as an array: ```javascript const result = await classifier(['I love transformers!', 'I hate transformers!']); // [{'label': 'POSITIVE', 'score': 0.9998}, {'label': 'NEGATIVE', 'score': 0.9982}] ``` You can also specify a different model to use for the pipeline by passing it as the second argument to the `pipeline()` function. For example, to use a different model for sentiment analysis (like one trained to predict sentiment of a review as a number of stars between 1 and 5), you can do: <!-- TODO: REPLACE 'nlptown/bert-base-multilingual-uncased-sentiment' with 'nlptown/bert-base-multilingual-uncased-sentiment'--> ```javascript const reviewer = await pipeline('sentiment-analysis', 'Xenova/bert-base-multilingual-uncased-sentiment'); const result = await reviewer('The Shawshank Redemption is a true masterpiece of cinema.'); // [{label: '5 stars', score: 0.8167929649353027}] ``` Transformers.js supports loading any model hosted on the Hugging Face Hub, provided it has ONNX weights (located in a subfolder called `onnx`). For more information on how to convert your PyTorch, TensorFlow, or JAX model to ONNX, see the [conversion section](./custom_usage#convert-your-models-to-onnx). The `pipeline()` function is a great way to quickly use a pretrained model for inference, as it takes care of all the preprocessing and postprocessing for you. For example, if you want to perform Automatic Speech Recognition (ASR) using OpenAI's Whisper model, you can do: <!-- TODO: Replace 'Xenova/whisper-small.en' with 'openai/whisper-small.en' --> ```javascript // Create a pipeline for Automatic Speech Recognition const transcriber = await pipeline('automatic-speech-recognition', 'Xenova/whisper-small.en'); // Transcribe an audio file, loaded from a URL. const result = await transcriber('https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac'); // {text: ' I have a dream that one day this nation will rise up and live out the true meaning of its creed.'} ``` ## Pipeline options ### Loading We offer a variety of options to control how models are loaded from the Hugging Face Hub (or locally). By default, when running in-browser, a *quantized* version of the model is used, which is smaller and faster, but usually less accurate. To override this behaviour (i.e., use the unquantized model), you can use a custom `PretrainedOptions` object as the third parameter to the `pipeline` function: ```javascript // Create a pipeline for feature extraction, using the full-precision model (fp32) const pipe = await pipeline('feature-extraction', 'Xenova/all-MiniLM-L6-v2', { dtype: "fp32", }); ``` Check out the section on [quantization](./guides/dtypes) to learn more. You can also specify which revision of the model to use, by passing a `revision` parameter. Since the Hugging Face Hub uses a git-based versioning system, you can use any valid git revision specifier (e.g., branch name or commit hash). ```javascript const transcriber = await pipeline('automatic-speech-recognition', 'Xenova/whisper-tiny.en', { revision: 'output_attentions', }); ``` For the full list of options, check out the [PretrainedOptions](./api/utils/hub#module_utils/hub..PretrainedOptions) documentation. ### Running Many pipelines have additional options that you can specify. For example, when using a model that does multilingual translation, you can specify the source and target languages like this: <!-- TODO: Replace 'Xenova/nllb-200-distilled-600M' with 'facebook/nllb-200-distilled-600M' --> ```javascript // Create a pipeline for translation const translator = await pipeline('translation', 'Xenova/nllb-200-distilled-600M'); // Translate from English to Greek const result = await translator('I like to walk my dog.', { src_lang: 'eng_Latn', tgt_lang: 'ell_Grek' }); // [ { translation_text: 'Μου αρέσει να περπατάω το σκυλί μου.' } ] // Translate back to English const result2 = await translator(result[0].translation_text, { src_lang: 'ell_Grek', tgt_lang: 'eng_Latn' }); // [ { translation_text: 'I like to walk my dog.' } ] ``` When using models that support auto-regressive generation, you can specify generation parameters like the number of new tokens, sampling methods, temperature, repetition penalty, and much more. For a full list of available parameters, see to the [GenerationConfig](./api/utils/generation#module_utils/generation.GenerationConfig) class. For example, to generate a poem using `LaMini-Flan-T5-783M`, you can do: <!-- TODO: Replace 'Xenova/LaMini-Flan-T5-783M' with 'MBZUAI/LaMini-Flan-T5-783M' --> ```javascript // Create a pipeline for text2text-generation const poet = await pipeline('text2text-generation', 'Xenova/LaMini-Flan-T5-783M'); const result = await poet('Write me a love poem about cheese.', { max_new_tokens: 200, temperature: 0.9, repetition_penalty: 2.0, no_repeat_ngram_size: 3, }); ``` Logging `result[0].generated_text` to the console gives: ``` Cheese, oh cheese! You're the perfect comfort food. Your texture so smooth and creamy you can never get old. With every bite it melts in your mouth like buttery delights that make me feel right at home with this sweet treat of mine. From classic to bold flavor combinations, I love how versatile you are as an ingredient too? Cheddar is my go-to for any occasion or mood; It adds depth and richness without being overpowering its taste buds alone ``` ### Streaming Some pipelines such as `text-generation` or `automatic-speech-recognition` support streaming output. This is achieved using the `TextStreamer` class. For example, when using a chat model like `Qwen2.5-Coder-0.5B-Instruct`, you can specify a callback function that will be called with each generated token text (if unset, new tokens will be printed to the console). ```js import { pipeline, TextStreamer } from "@huggingface/transformers"; // Create a text generation pipeline const generator = await pipeline( "text-generation", "onnx-community/Qwen2.5-Coder-0.5B-Instruct", { dtype: "q4" }, ); // Define the list of messages const messages = [ { role: "system", content: "You are a helpful assistant." }, { role: "user", content: "Write a quick sort algorithm." }, ]; // Create text streamer const streamer = new TextStreamer(generator.tokenizer, { skip_prompt: true, // Optionally, do something with the text (e.g., write to a textbox) // callback_function: (text) => { /* Do something with text */ }, }) // Generate a response const result = await generator(messages, { max_new_tokens: 512, do_sample: false, streamer }); ``` Logging `result[0].generated_text` to the console gives: <details> <summary>Click to view the console output</summary> <pre> Here's a simple implementation of the quick sort algorithm in Python: ```python def quick_sort(arr): if len(arr) <= 1: return arr pivot = arr[len(arr) // 2] left = [x for x in arr if x < pivot] middle = [x for x in arr if x == pivot] right = [x for x in arr if x > pivot] return quick_sort(left) + middle + quick_sort(right) # Example usage: arr = [3, 6, 8, 10, 1, 2] sorted_arr = quick_sort(arr) print(sorted_arr) ``` ### Explanation: - **Base Case**: If the array has less than or equal to one element (i.e., `len(arr)` is less than or equal to `1`), it is already sorted and can be returned as is. - **Pivot Selection**: The pivot is chosen as the middle element of the array. - **Partitioning**: The array is partitioned into three parts: elements less than the pivot (`left`), elements equal to the pivot (`middle`), and elements greater than the pivot (`right`). These partitions are then recursively sorted. - **Recursive Sorting**: The subarrays are sorted recursively using `quick_sort`. This approach ensures that each recursive call reduces the problem size by half until it reaches a base case. </pre> </details> This streaming feature allows you to process the output as it is generated, rather than waiting for the entire output to be generated before processing it. For more information on the available options for each pipeline, refer to the [API Reference](./api/pipelines). If you would like more control over the inference process, you can use the [`AutoModel`](./api/models), [`AutoTokenizer`](./api/tokenizers), or [`AutoProcessor`](./api/processors) classes instead. ## Available tasks <include> { "path": "../snippets/5_supported-tasks.snippet" } </include>
transformers.js/docs/source/pipelines.md/0
{ "file_path": "transformers.js/docs/source/pipelines.md", "repo_id": "transformers.js", "token_count": 2944 }
332
import { pipeline, env } from '@xenova/transformers'; env.allowLocalModels = false; /** * This class uses the Singleton pattern to ensure that only one instance of the pipeline is loaded. */ class CodeCompletionPipeline { static task = 'text-generation'; static model = null; static instance = null; static async getInstance(progress_callback = null) { if (this.instance === null) { this.instance = pipeline(this.task, this.model, { progress_callback }); } return this.instance; } } // Listen for messages from the main thread self.addEventListener('message', async (event) => { const { model, text, max_new_tokens, // Generation parameters temperature, top_k, do_sample, } = event.data; if (CodeCompletionPipeline.model !== model) { // Invalidate model if different CodeCompletionPipeline.model = model; if (CodeCompletionPipeline.instance !== null) { (await CodeCompletionPipeline.getInstance()).dispose(); CodeCompletionPipeline.instance = null; } } // Retrieve the code-completion pipeline. When called for the first time, // this will load the pipeline and save it for future use. let generator = await CodeCompletionPipeline.getInstance(x => { // We also add a progress callback to the pipeline so that we can // track model loading. self.postMessage(x); }); // Actually perform the code-completion let output = await generator(text, { max_new_tokens, temperature, top_k, do_sample, // Allows for partial output callback_function: x => { self.postMessage({ status: 'update', output: generator.tokenizer.decode(x[0].output_token_ids, { skip_special_tokens: true }) }); } }); // Send the output back to the main thread self.postMessage({ status: 'complete', output: output, }); });
transformers.js/examples/code-completion/src/worker.js/0
{ "file_path": "transformers.js/examples/code-completion/src/worker.js", "repo_id": "transformers.js", "token_count": 817 }
333
module.exports = { packagerConfig: {}, rebuildConfig: {}, makers: [ { name: '@electron-forge/maker-squirrel', config: {}, }, { name: '@electron-forge/maker-zip', platforms: ['darwin'], }, { name: '@electron-forge/maker-deb', config: {}, }, { name: '@electron-forge/maker-rpm', config: {}, }, ], };
transformers.js/examples/electron/forge.config.js/0
{ "file_path": "transformers.js/examples/electron/forge.config.js", "repo_id": "transformers.js", "token_count": 192 }
334
// content.js - the content scripts which is run in the context of web pages, and has access // to the DOM and other web APIs. // Example usage: // const message = { // action: 'classify', // text: 'text to classify', // } // chrome.runtime.sendMessage(message, (response) => { // console.log('received user data', response) // });
transformers.js/examples/extension/src/content.js/0
{ "file_path": "transformers.js/examples/extension/src/content.js", "repo_id": "transformers.js", "token_count": 107 }
335
import { Florence2ForConditionalGeneration, AutoProcessor, AutoTokenizer, RawImage, full, } from '@xenova/transformers'; async function hasFp16() { try { const adapter = await navigator.gpu.requestAdapter(); return adapter.features.has('shader-f16'); } catch (e) { return false; } } /** * This class uses the Singleton pattern to ensure that only one instance of the model is loaded. */ class Florence2Singleton { static model_id = 'onnx-community/Florence-2-base-ft'; static async getInstance(progress_callback = null) { this.processor ??= AutoProcessor.from_pretrained(this.model_id); this.tokenizer ??= AutoTokenizer.from_pretrained(this.model_id); this.supports_fp16 ??= await hasFp16(); this.model ??= Florence2ForConditionalGeneration.from_pretrained(this.model_id, { dtype: { embed_tokens: this.supports_fp16 ? 'fp16' : 'fp32', vision_encoder: this.supports_fp16 ? 'fp16' : 'fp32', encoder_model: 'q4', // or 'fp16' or 'fp32' decoder_model_merged: 'q4', // or 'fp16' or 'fp32' }, device: 'webgpu', progress_callback, }); return Promise.all([this.model, this.tokenizer, this.processor]); } } async function load() { self.postMessage({ status: 'loading', data: 'Loading model...' }); // Load the pipeline and save it for future use. const [model, tokenizer, processor] = await Florence2Singleton.getInstance(x => { // We also add a progress callback to the pipeline so that we can // track model loading. self.postMessage(x); }); self.postMessage({ status: 'loading', data: 'Compiling shaders and warming up model...' }); // Dummy text and vision inputs const text_inputs = tokenizer('a'); const pixel_values = full([1, 3, 768, 768], 0.0); // Run model with dummy input to compile shaders await model.generate({ ...text_inputs, pixel_values, max_new_tokens: 1, }); self.postMessage({ status: 'ready' }); } const TASKS_WITH_INPUTS = [ '<CAPTION_TO_PHRASE_GROUNDING>', ] let vision_inputs; let image_size; async function run({ text, url, task }) { const [model, tokenizer, processor] = await Florence2Singleton.getInstance(); // Read and preprocess image const start = performance.now(); if (!vision_inputs) { // Cache vision inputs when possible const image = await RawImage.fromURL(url); image_size = image.size; vision_inputs = await processor(image); } let user_input = task; if (TASKS_WITH_INPUTS.includes(task) && text) { user_input += text; } const prompts = processor.construct_prompts(user_input); const text_inputs = tokenizer(prompts); // Generate text const generated_ids = await model.generate({ ...text_inputs, ...vision_inputs, max_new_tokens: 128, num_beams: 1, do_sample: false, }); // Decode generated text const generated_text = tokenizer.batch_decode(generated_ids, { skip_special_tokens: false })[0]; // Post-process the generated text const result = processor.post_process_generation(generated_text, task, image_size); const end = performance.now(); self.postMessage({ status: 'complete', result, time: end - start }); } // Listen for messages from the main thread self.addEventListener('message', async (e) => { const { type, data } = e.data; switch (type) { case 'load': load(); break; case 'run': run(data); break; case 'reset': vision_inputs = image_size = null; break; } });
transformers.js/examples/florence2-webgpu/src/worker.js/0
{ "file_path": "transformers.js/examples/florence2-webgpu/src/worker.js", "repo_id": "transformers.js", "token_count": 1609 }
336
'use client' import { useState } from 'react' export default function Home() { // Keep track of the classification result and the model loading status. const [result, setResult] = useState(null); const [ready, setReady] = useState(null); const classify = async (text) => { if (!text) return; if (ready === null) setReady(false); // Make a request to the /classify route on the server. const result = await fetch(`/classify?text=${encodeURIComponent(text)}`); // If this is the first time we've made a request, set the ready flag. if (!ready) setReady(true); const json = await result.json(); setResult(json); }; return ( <main className="flex min-h-screen flex-col items-center justify-center p-12"> <h1 className="text-5xl font-bold mb-2 text-center">Transformers.js</h1> <h2 className="text-2xl mb-4 text-center">Next.js template (server-side)</h2> <input type="text" className="w-full max-w-xs p-2 border border-gray-300 rounded mb-4" placeholder="Enter text here" onInput={e => { classify(e.target.value); }} /> {ready !== null && ( <pre className="bg-gray-100 p-2 rounded"> { (!ready || !result) ? 'Loading...' : JSON.stringify(result, null, 2)} </pre> )} </main> ) }
transformers.js/examples/next-server/src/app/page.js/0
{ "file_path": "transformers.js/examples/next-server/src/app/page.js", "repo_id": "transformers.js", "token_count": 545 }
337
:root { font-family: Inter, system-ui, Avenir, Helvetica, Arial, sans-serif; line-height: 1.5; font-weight: 400; color: #213547; background-color: #ffffff; font-synthesis: none; text-rendering: optimizeLegibility; -webkit-font-smoothing: antialiased; -moz-osx-font-smoothing: grayscale; -webkit-text-size-adjust: 100%; } body { margin: 0; display: flex; place-items: center; min-width: 320px; min-height: 100vh; } h1 { font-size: 3.2em; line-height: 1; } h1, h2 { margin: 8px; } select { padding: 0.3em; cursor: pointer; } textarea { padding: 0.6em; } button { padding: 0.6em 1.2em; cursor: pointer; font-weight: 500; } button[disabled] { cursor: not-allowed; } select, textarea, button { border-radius: 8px; border: 1px solid transparent; font-size: 1em; font-family: inherit; background-color: #f9f9f9; transition: border-color 0.25s; } select:hover, textarea:hover, button:not([disabled]):hover { border-color: #646cff; } select:focus, select:focus-visible, textarea:focus, textarea:focus-visible, button:focus, button:focus-visible { outline: 4px auto -webkit-focus-ring-color; }
transformers.js/examples/react-translator/src/index.css/0
{ "file_path": "transformers.js/examples/react-translator/src/index.css", "repo_id": "transformers.js", "token_count": 480 }
338
import { defineConfig } from 'vite'; export default defineConfig(env => { const config = { build: { target: 'esnext' } }; // TODO: Add this back when .wasm files are served locally // if (env.mode === 'development') { // // The .wasm files are not correctly served using Vite in development mode. // // This is a workaround to exclude the onnxruntime-web package from Vite's optimization. // // See also: https://github.com/vitejs/vite/issues/8427 // config.optimizeDeps = { exclude: ["onnxruntime-web"] }; // } return config; });
transformers.js/examples/segment-anything-client/vite.config.js/0
{ "file_path": "transformers.js/examples/segment-anything-client/vite.config.js", "repo_id": "transformers.js", "token_count": 192 }
339
@tailwind base; @tailwind components; @tailwind utilities; :root { font-family: Inter, system-ui, Avenir, Helvetica, Arial, sans-serif; line-height: 1.5; font-weight: 400; color: #213547; background-color: #ffffff; font-synthesis: none; text-rendering: optimizeLegibility; -webkit-font-smoothing: antialiased; -moz-osx-font-smoothing: grayscale; -webkit-text-size-adjust: 100%; } audio::-webkit-media-controls-panel { background-color: white; }
transformers.js/examples/text-to-speech-client/src/index.css/0
{ "file_path": "transformers.js/examples/text-to-speech-client/src/index.css", "repo_id": "transformers.js", "token_count": 181 }
340
@tailwind base; @tailwind components; @tailwind utilities; :root { font-family: Inter, system-ui, Avenir, Helvetica, Arial, sans-serif; line-height: 1.5; font-weight: 400; color-scheme: light dark; color: rgba(255, 255, 255, 0.87); background-color: #242424; font-synthesis: none; text-rendering: optimizeLegibility; -webkit-font-smoothing: antialiased; -moz-osx-font-smoothing: grayscale; -webkit-text-size-adjust: 100%; } body { margin: 0; display: flex; place-items: center; min-height: 100vh; } @media (prefers-color-scheme: light) { :root { color: #213547; background-color: #ffffff; } a:hover { color: #747bff; } button { background-color: #f9f9f9; } }
transformers.js/examples/tokenizer-playground/src/index.css/0
{ "file_path": "transformers.js/examples/tokenizer-playground/src/index.css", "repo_id": "transformers.js", "token_count": 306 }
341
import { env, AutoTokenizer, Moondream1ForConditionalGeneration, TextStreamer, StoppingCriteria, RawImage, AutoProcessor, Tensor, full, } from '@xenova/transformers'; const DEVICE = 'webgpu'; const MAX_NEW_TOKENS = 256; env.backends.onnx.wasm.proxy = DEVICE !== 'webgpu'; async function hasFp16() { try { const adapter = await navigator.gpu.requestAdapter(); return adapter.features.has('shader-f16'); } catch (e) { return false; } } /** * This class uses the Singleton pattern to ensure that only one instance of the model is loaded. */ class TextGenerationPipeline { static model_id = 'Xenova/moondream2'; static tokenizer = null; static processor = null; static model = null; static supportsFp16 = null; static async getInstance(progress_callback = null) { this.tokenizer ??= AutoTokenizer.from_pretrained(this.model_id, { progress_callback, }); this.processor ??= AutoProcessor.from_pretrained(this.model_id); // Choose the model based on whether fp16 is available this.supportsFp16 ??= await hasFp16(); this.model ??= Moondream1ForConditionalGeneration.from_pretrained(this.model_id, { dtype: { embed_tokens: this.supportsFp16 ? 'fp16' : 'fp32', // or 'fp32' vision_encoder: this.supportsFp16 ? 'fp16' : 'fp32', // or 'q8' decoder_model_merged: 'q4', // or 'q4f16' or 'q8' }, device: DEVICE, progress_callback, }); return Promise.all([this.tokenizer, this.processor, this.model]); } } class CallbackTextStreamer extends TextStreamer { constructor(tokenizer, cb) { super(tokenizer, { skip_prompt: true, skip_special_tokens: true, }); this.cb = cb; } on_finalized_text(text) { this.cb(text); } } class InterruptableStoppingCriteria extends StoppingCriteria { constructor() { super(); this.interrupted = false; } interrupt() { this.interrupted = true; } reset() { this.interrupted = false; } _call(input_ids, scores) { return new Array(input_ids.length).fill(this.interrupted); } } const stopping_criteria = new InterruptableStoppingCriteria(); async function generate(messages) { // Only support a single image for now const images = messages.filter(x => x.image).map(x => x.image); if (images.length > 1) { self.postMessage({ status: 'error', error: 'Currently, at most one image is supported.', }); return; } // Retrieve the text-generation pipeline. const [tokenizer, processor, model] = await TextGenerationPipeline.getInstance(); // Construct and tokenize prompt const prompt = messages.map(x => `${x.image ? '<image>\n\n' : ''}${x.role === 'user' ? 'Question: ' : 'Answer: '}${x.content.trim()}`).join('\n\n') + '\n\nAnswer:' let inputs = tokenizer(prompt); if (images.length > 0) { const image = await RawImage.fromURL(images[0]); const vision_inputs = await processor(image); inputs = { ...inputs, ...vision_inputs }; } let startTime; let numTokens = 0; const cb = (output) => { startTime ??= performance.now(); let tps; if (numTokens++ > 0) { tps = numTokens / (performance.now() - startTime) * 1000; } self.postMessage({ status: 'update', output, tps, numTokens, }); } const streamer = new CallbackTextStreamer(tokenizer, cb); // Tell the main thread we are starting self.postMessage({ status: 'start' }); const outputs = await model.generate({ ...inputs, max_new_tokens: MAX_NEW_TOKENS, streamer, stopping_criteria, }); const outputText = tokenizer.batch_decode(outputs, { skip_special_tokens: false }); // Send the output back to the main thread self.postMessage({ status: 'complete', output: outputText, }); } async function load() { self.postMessage({ status: 'loading', data: 'Loading model...' }); // Load the pipeline and save it for future use. const [tokenizer, processor, model] = await TextGenerationPipeline.getInstance(x => { // We also add a progress callback to the pipeline so that we can // track model loading. self.postMessage(x); }); self.postMessage({ status: 'loading', data: 'Compiling shaders and warming up model...' }); // Run model with dummy input to compile shaders const text_inputs = tokenizer('a'); const vision_inputs = { pixel_values: full([1, 3, 378, 378], 0.0) } const inputs = { ...text_inputs, ...vision_inputs }; await model.generate({ ...inputs, max_new_tokens: 1 }); self.postMessage({ status: 'ready' }); } // Listen for messages from the main thread self.addEventListener('message', async (e) => { const { type, data } = e.data; switch (type) { case 'load': load(); break; case 'generate': stopping_criteria.reset(); generate(data); break; case 'interrupt': stopping_criteria.interrupt(); break; case 'reset': stopping_criteria.reset(); break; } });
transformers.js/examples/webgpu-vlm/src/worker.js/0
{ "file_path": "transformers.js/examples/webgpu-vlm/src/worker.js", "repo_id": "transformers.js", "token_count": 2356 }
342
from enum import Enum from tqdm import tqdm from typing import Set, List, Optional import onnx import os from dataclasses import dataclass, field from transformers import HfArgumentParser from onnxruntime.quantization import QuantType, QuantizationMode from onnxruntime.quantization.onnx_quantizer import ONNXQuantizer from onnxruntime.quantization.registry import IntegerOpsRegistry from onnxruntime.quantization.matmul_4bits_quantizer import MatMul4BitsQuantizer from onnxruntime.quantization.matmul_bnb4_quantizer import MatMulBnb4Quantizer from . import float16 from .utils import check_and_save_model class QuantMode(Enum): # F32 = 'fp32' FP16 = "fp16" Q8 = "q8" QI8 = "int8" QU8 = "uint8" Q4 = "q4" Q4F16 = "q4f16" BNB4 = "bnb4" QUANTIZE_SUFFIX_MAPPING = { QuantMode.Q8: "quantized", } QUANTIZE_OPTIONS = tuple(x.value for x in QuantMode) # A list of operators that, when detected in a model, should select QUInt8 as the weight type for 8-bit quantization. QUINT8_OPS = ( # NOTE: # As of 2024/11/29, the latest version of onnxruntime-web is 1.20.1, and does not support INT8 weights for Conv layers. # If you attempt to run a model with INT8 weights for Conv layers, you will get an error like: # `Can't create a session. ERROR_CODE: 9, ERROR_MESSAGE: Could not find an implementation for ConvInteger(10) node with name '/.../Conv_quant'` # # For this reason, we choose model weight types to ensure compatibility with onnxruntime-web. # # As per docs, signed weight type (QInt8) is faster on most CPUs, so, we use that unless the model contains a Conv layer. # For more information, see: # - https://github.com/microsoft/onnxruntime/issues/3130#issuecomment-1105200621 # - https://github.com/microsoft/onnxruntime/issues/2339 "Conv", # Models produced by onnxruntime-genai contain optimized operators that perform better with QUInt8 weights. "GroupQueryAttention", "MultiHeadAttention", # TODO: "SimplifiedLayerNormalization", "SkipSimplifiedLayerNormalization" ) @dataclass class IOArguments: """ Arguments to specify input and output folders """ input_folder: str = field( metadata={ "help": "Path of the input folder containing the .onnx models to quantize" } ) output_folder: str = field( metadata={ "help": "Path of the output folder where the quantized .onnx models will be saved" } ) @dataclass class QuantizationArguments: """ Arguments for quantizing ONNX models """ modes: QuantMode = field( default=QUANTIZE_OPTIONS, metadata={ "help": "Quantization mode to use.", "choices": QUANTIZE_OPTIONS, "nargs": "+", }, ) # 8-bit quantization per_channel: bool = field( default=None, metadata={"help": "Whether to quantize weights per channel"} ) reduce_range: bool = field( default=None, metadata={ "help": "Whether to quantize weights with 7-bits. It may improve the accuracy for some models running on non-VNNI machine, especially for per-channel mode" }, ) # 4-bit quantization block_size: int = field( default=None, metadata={ "help": "Block size for blockwise quantization. Note: bnb.nn.Linear4bit only uses block_size=64" }, ) # MatMul4BitsQuantizer is_symmetric: bool = field( default=True, metadata={"help": "Indicate whether to quantize the model symmetrically"}, ) accuracy_level: int = field( default=None, metadata={ "help": "Accuracy level of the 4-bit quantized MatMul computation. " "Refer to the MatMulNBits contrib op's 'accuracy_level' attribute for details " "(https://github.com/microsoft/onnxruntime/blob/main/docs/ContribOperators.md#commicrosoftmatmulnbits)." }, ) # MatMulBnb4Quantizer quant_type: int = field( default=MatMulBnb4Quantizer.NF4, metadata={ "help": "Quantization data type. 0: FP4, 1: NF4", "choices": [MatMulBnb4Quantizer.FP4, MatMulBnb4Quantizer.NF4], }, ) op_block_list: List[str] = field( default=None, metadata={ "help": "List of operators to exclude from quantization." "Can be any standard ONNX operator (see https://onnx.ai/onnx/operators/)" "or your custom implemented operators.", "nargs": "+", }, ) def get_operators(model: onnx.ModelProto) -> Set[str]: operators = set() def traverse_graph(graph): for node in graph.node: operators.add(node.op_type) for attr in node.attribute: if attr.type == onnx.AttributeProto.GRAPH: traverse_graph(attr.g) traverse_graph(model.graph) return operators def quantize_q8( model: onnx.ModelProto, save_path: str, per_channel: bool, reduce_range: bool, weight_type: QuantType, op_block_list: Optional[List[str]] ): """ Quantize the weights of the model from float32 to int8/uint8 Uses unsigned ints for activation values, signed ints for weights, per https://onnxruntime.ai/docs/performance/quantization.html#data-type-selection it is faster on most CPU architectures """ op_types_to_quantize = set(IntegerOpsRegistry.keys()) if op_block_list is not None: op_types_to_quantize.difference_update(op_block_list) quantizer = ONNXQuantizer( model, per_channel, reduce_range, mode=QuantizationMode.IntegerOps, static=False, weight_qType=weight_type, activation_qType=QuantType.QUInt8, # dynamic activation only supports uint8 tensors_range=None, nodes_to_quantize=[], nodes_to_exclude=[], op_types_to_quantize=op_types_to_quantize, extra_options=dict( EnableSubgraph=True, MatMulConstBOnly=True, ), ) quantizer.quantize_model() check_and_save_model(quantizer.model.model, save_path) def quantize_fp16( model: onnx.ModelProto, save_path: str, op_block_list: Optional[List[str]] ): """ Quantize the weights of the model from float32 to float16 """ # Check whether we should disable shape infer: # ValueError: Message onnx.ModelProto exceeds maximum protobuf size of 2GB: 2338583841 disable_shape_infer = model.ByteSize() >= onnx.checker.MAXIMUM_PROTOBUF blocked_ops = set(float16.DEFAULT_OP_BLOCK_LIST) if op_block_list is not None: blocked_ops.update(op_block_list) model_fp16 = float16.convert_float_to_float16( model, keep_io_types=True, disable_shape_infer=disable_shape_infer, op_block_list=blocked_ops, ) check_and_save_model(model_fp16, save_path) def quantize_q4( model: onnx.ModelProto, save_path: str | None, block_size: int, is_symmetric: bool, accuracy_level: int, ): """ Quantize the weights of the model from float32 to 4-bit int """ quantizer = MatMul4BitsQuantizer( model=model, block_size=block_size, is_symmetric=is_symmetric, accuracy_level=accuracy_level, ) quantizer.process() if save_path: check_and_save_model(quantizer.model.model, save_path) return quantizer.model.model def quantize_bnb4( model: onnx.ModelProto, save_path: str, block_size: int, quant_type: int, ): """ Quantize the weights of the model from float32 to 4-bit int using MatMulBnb4Quantizer """ quantizer = MatMulBnb4Quantizer( model=model, block_size=block_size, quant_type=quant_type, ) quantizer.process() check_and_save_model(quantizer.model.model, save_path) return quantizer.model.model def quantize(input_folder, output_folder, quantization_args: QuantizationArguments): # (Step 1) Validate the arguments if not quantization_args.modes: raise ValueError("At least one quantization mode must be specified") if not os.path.exists(input_folder): raise ValueError(f"Input folder {input_folder} does not exist") model_names_or_paths = [ os.path.join(input_folder, file) for file in os.listdir(input_folder) if file.endswith(".onnx") ] if not model_names_or_paths: raise ValueError(f"No .onnx models found in {input_folder}") os.makedirs(output_folder, exist_ok=True) # (Step 2) Quantize the models for model_path in (progress_models := tqdm(model_names_or_paths)): progress_models.set_description(f"Processing {model_path}") file_name_without_extension = os.path.splitext(os.path.basename(model_path))[0] for mode in (progress := tqdm(quantization_args.modes)): progress.set_description(f" - Quantizing to {mode}") mode = QuantMode(mode) suffix = QUANTIZE_SUFFIX_MAPPING.get(mode, mode.value) save_path = os.path.join( output_folder, f"{file_name_without_extension}_{suffix}.onnx", ) # NOTE: Unfortunately, we need to reload the model for each quantization mode, # which is memory inefficient. This is because the quantization functions # modify the model in-place, and we need to keep the original model for each mode. model = onnx.load_model(model_path) if mode == QuantMode.FP16: quantize_fp16( model, save_path, quantization_args.op_block_list ) elif mode in (QuantMode.Q4, QuantMode.Q4F16): block_size = quantization_args.block_size or 32 q4_model = quantize_q4( model, save_path=None if mode == QuantMode.Q4F16 else save_path, block_size=block_size, is_symmetric=quantization_args.is_symmetric, accuracy_level=quantization_args.accuracy_level, ) if mode == QuantMode.Q4F16: quantize_fp16( q4_model, save_path, quantization_args.op_block_list, ) elif mode == QuantMode.BNB4: quantize_bnb4( model, save_path, block_size=quantization_args.block_size or 64, quant_type=( quantization_args.quant_type if quantization_args.quant_type is not None else MatMulBnb4Quantizer.NF4 ), ) elif mode in (QuantMode.Q8, QuantMode.QI8, QuantMode.QU8): if mode == QuantMode.Q8: op_types = get_operators(model) weight_type = ( QuantType.QUInt8 if any(x in QUINT8_OPS for x in op_types) else QuantType.QInt8 ) elif mode == QuantMode.QI8: weight_type = QuantType.QInt8 else: # mode == QuantMode.QU8: weight_type = QuantType.QUInt8 quantize_q8( model, save_path, per_channel=quantization_args.per_channel, reduce_range=quantization_args.reduce_range, weight_type=weight_type, op_block_list=quantization_args.op_block_list, ) def main(): parser = HfArgumentParser((IOArguments, QuantizationArguments)) io_args, quantization_args = parser.parse_args_into_dataclasses() input_folder = io_args.input_folder output_folder = io_args.output_folder quantize(input_folder, output_folder, quantization_args) if __name__ == "__main__": main()
transformers.js/scripts/quantize.py/0
{ "file_path": "transformers.js/scripts/quantize.py", "repo_id": "transformers.js", "token_count": 5533 }
343
import { ImageProcessor, } from "../../base/image_processors_utils.js"; export class EfficientNetImageProcessor extends ImageProcessor { constructor(config) { super(config); // @ts-expect-error TS2339 this.include_top = this.config.include_top ?? true; if (this.include_top) { this.image_std = this.image_std.map(x => x * x); } } }
transformers.js/src/models/efficientnet/image_processing_efficientnet.js/0
{ "file_path": "transformers.js/src/models/efficientnet/image_processing_efficientnet.js", "repo_id": "transformers.js", "token_count": 173 }
344
import { Processor } from "../../base/processing_utils.js"; import { AutoImageProcessor } from "../auto/image_processing_auto.js"; import { AutoTokenizer } from "../../tokenizers.js"; export class LlavaProcessor extends Processor { static tokenizer_class = AutoTokenizer static image_processor_class = AutoImageProcessor static uses_processor_config = true; /** * @typedef {import('../../utils/image.js').RawImage} RawImage */ // `images` is required, `text` is optional async _call(/** @type {RawImage|RawImage[]} */ images, text = null, kwargs = {}) { const image_inputs = await this.image_processor(images, kwargs); if (text) { const [height, width] = image_inputs.pixel_values.dims.slice(-2); const {image_token, patch_size, num_additional_image_tokens} = this.config; const num_image_tokens = Math.floor( height / patch_size ) * Math.floor(width / patch_size) + num_additional_image_tokens; text = structuredClone(text); // Avoid modifying the original text input if (!Array.isArray(text)) { text = [text]; } for (let i = 0; i < text.length; ++i) { text[i] = text[i].replace(image_token, image_token.repeat(num_image_tokens)); } } const text_inputs = text ? this.tokenizer(text, kwargs) : {}; return { ...image_inputs, ...text_inputs, } } }
transformers.js/src/models/llava/processing_llava.js/0
{ "file_path": "transformers.js/src/models/llava/processing_llava.js", "repo_id": "transformers.js", "token_count": 677 }
345
import { Processor } from "../../base/processing_utils.js"; import { AutoImageProcessor } from "../auto/image_processing_auto.js"; import { AutoTokenizer } from "../../tokenizers.js"; const IMAGE_TOKEN = "<image>"; function build_string_from_input( prompt, bos_token, image_seq_len, image_token, num_images, ) { return `${image_token.repeat(image_seq_len * num_images)}${bos_token}${prompt}\n` } export class PaliGemmaProcessor extends Processor { static tokenizer_class = AutoTokenizer static image_processor_class = AutoImageProcessor static uses_processor_config = false; /** * @typedef {import('../../utils/image.js').RawImage} RawImage */ // `images` is required, `text` is optional async _call(/** @type {RawImage|RawImage[]} */ images, text = null, kwargs = {}) { if (!text) { console.warn( "You are using PaliGemma without a text prefix. It will perform as a picture-captioning model." ) text = "" } if (!Array.isArray(images)) { images = [images] } if (!Array.isArray(text)) { text = [text] } const bos_token = this.tokenizer.bos_token; // @ts-expect-error TS2339 const image_seq_length = this.image_processor.config.image_seq_length; let input_strings; if (text.some((t) => t.includes(IMAGE_TOKEN))) { input_strings = text.map( sample => { const expanded_sample = sample.replaceAll(IMAGE_TOKEN, IMAGE_TOKEN.repeat(image_seq_length)); const bos_rfind_index = expanded_sample.lastIndexOf(IMAGE_TOKEN); const bos_index = bos_rfind_index === -1 ? 0 : bos_rfind_index + IMAGE_TOKEN.length; return expanded_sample.slice(0, bos_index) + bos_token + expanded_sample.slice(bos_index) + "\n"; } ) } else { console.warn( "You are passing both `text` and `images` to `PaliGemmaProcessor`. The processor expects special " + "image tokens in the text, as many tokens as there are images per each text. It is recommended to " + "add `<image>` tokens in the very beginning of your text. For this call, we will infer how many images " + "each text has and add special tokens." ) input_strings = text.map( sample => build_string_from_input( sample, bos_token, image_seq_length, IMAGE_TOKEN, images.length, ) ) } const text_inputs = this.tokenizer(input_strings, kwargs); const image_inputs = await this.image_processor(images, kwargs); return { ...image_inputs, ...text_inputs, } } }
transformers.js/src/models/paligemma/processing_paligemma.js/0
{ "file_path": "transformers.js/src/models/paligemma/processing_paligemma.js", "repo_id": "transformers.js", "token_count": 1386 }
346
export { Idefics3ImageProcessor as SmolVLMImageProcessor } from "../idefics3/image_processing_idefics3.js";
transformers.js/src/models/smolvlm/image_processing_smolvlm.js/0
{ "file_path": "transformers.js/src/models/smolvlm/image_processing_smolvlm.js", "repo_id": "transformers.js", "token_count": 39 }
347
import { FeatureExtractor, validate_audio_inputs } from '../../base/feature_extraction_utils.js'; import { Tensor } from '../../utils/tensor.js'; import { mel_filter_bank, spectrogram, window_function } from '../../utils/audio.js'; import { max } from '../../utils/maths.js'; export class WhisperFeatureExtractor extends FeatureExtractor { constructor(config) { super(config); // Prefer given `mel_filters` from preprocessor_config.json, or calculate them if they don't exist. this.config.mel_filters ??= mel_filter_bank( Math.floor(1 + this.config.n_fft / 2), // num_frequency_bins this.config.feature_size, // num_mel_filters 0.0, // min_frequency 8000.0, // max_frequency this.config.sampling_rate, // sampling_rate "slaney", // norm "slaney", // mel_scale ); this.window = window_function(this.config.n_fft, 'hann'); } /** * Computes the log-Mel spectrogram of the provided audio waveform. * @param {Float32Array|Float64Array} waveform The audio waveform to process. * @returns {Promise<Tensor>} An object containing the log-Mel spectrogram data as a Float32Array and its dimensions as an array of numbers. */ async _extract_fbank_features(waveform) { const features = await spectrogram( waveform, this.window, // window this.config.n_fft, // frame_length this.config.hop_length, // hop_length { power: 2.0, mel_filters: this.config.mel_filters, log_mel: 'log10', // Custom max_num_frames: Math.min( Math.floor(waveform.length / this.config.hop_length), this.config.nb_max_frames, // 3000 ) } ) const data = features.data; const maxValue = max(/** @type {Float32Array} */(data))[0]; for (let i = 0; i < data.length; ++i) { data[i] = (Math.max(data[i], maxValue - 8.0) + 4.0) / 4.0; } return features; } /** * Asynchronously extracts features from a given audio using the provided configuration. * @param {Float32Array|Float64Array} audio The audio data as a Float32Array/Float64Array. * @returns {Promise<{ input_features: Tensor }>} A Promise resolving to an object containing the extracted input features as a Tensor. */ async _call(audio, { max_length = null, } = {}) { validate_audio_inputs(audio, 'WhisperFeatureExtractor'); let waveform; const length = max_length ?? this.config.n_samples; if (audio.length > length) { if (audio.length > this.config.n_samples) { console.warn( "Attempting to extract features for audio longer than 30 seconds. " + "If using a pipeline to extract transcript from a long audio clip, " + "remember to specify `chunk_length_s` and/or `stride_length_s`." ); } waveform = audio.slice(0, length); } else { // pad with zeros waveform = new Float32Array(length); waveform.set(audio); } const features = await this._extract_fbank_features(waveform); return { input_features: features.unsqueeze_(0) }; } }
transformers.js/src/models/whisper/feature_extraction_whisper.js/0
{ "file_path": "transformers.js/src/models/whisper/feature_extraction_whisper.js", "repo_id": "transformers.js", "token_count": 1566 }
348
/** * @file Helper module for image processing. * * These functions and classes are only used internally, * meaning an end-user shouldn't need to access anything here. * * @module utils/image */ import { isNullishDimension, saveBlob } from './core.js'; import { getFile } from './hub.js'; import { apis } from '../env.js'; import { Tensor } from './tensor.js'; // Will be empty (or not used) if running in browser or web-worker import sharp from 'sharp'; let createCanvasFunction; let ImageDataClass; let loadImageFunction; const IS_BROWSER_OR_WEBWORKER = apis.IS_BROWSER_ENV || apis.IS_WEBWORKER_ENV; if (IS_BROWSER_OR_WEBWORKER) { // Running in browser or web-worker createCanvasFunction = (/** @type {number} */ width, /** @type {number} */ height) => { if (!self.OffscreenCanvas) { throw new Error('OffscreenCanvas not supported by this browser.'); } return new self.OffscreenCanvas(width, height) }; loadImageFunction = self.createImageBitmap; ImageDataClass = self.ImageData; } else if (sharp) { // Running in Node.js, electron, or other non-browser environment loadImageFunction = async (/**@type {sharp.Sharp}*/img) => { const metadata = await img.metadata(); const rawChannels = metadata.channels; const { data, info } = await img.rotate().raw().toBuffer({ resolveWithObject: true }); const newImage = new RawImage(new Uint8ClampedArray(data), info.width, info.height, info.channels); if (rawChannels !== undefined && rawChannels !== info.channels) { // Make sure the new image has the same number of channels as the input image. // This is necessary for grayscale images. newImage.convert(rawChannels); } return newImage; } } else { throw new Error('Unable to load image processing library.'); } // Defined here: https://github.com/python-pillow/Pillow/blob/a405e8406b83f8bfb8916e93971edc7407b8b1ff/src/libImaging/Imaging.h#L262-L268 const RESAMPLING_MAPPING = { 0: 'nearest', 1: 'lanczos', 2: 'bilinear', 3: 'bicubic', 4: 'box', 5: 'hamming', } /** * Mapping from file extensions to MIME types. */ const CONTENT_TYPE_MAP = new Map([ ['png', 'image/png'], ['jpg', 'image/jpeg'], ['jpeg', 'image/jpeg'], ['gif', 'image/gif'], ]); export class RawImage { /** * Create a new `RawImage` object. * @param {Uint8ClampedArray|Uint8Array} data The pixel data. * @param {number} width The width of the image. * @param {number} height The height of the image. * @param {1|2|3|4} channels The number of channels. */ constructor(data, width, height, channels) { this.data = data; this.width = width; this.height = height; this.channels = channels; } /** * Returns the size of the image (width, height). * @returns {[number, number]} The size of the image (width, height). */ get size() { return [this.width, this.height]; } /** * Helper method for reading an image from a variety of input types. * @param {RawImage|string|URL|Blob|HTMLCanvasElement|OffscreenCanvas} input * @returns The image object. * * **Example:** Read image from a URL. * ```javascript * let image = await RawImage.read('https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/football-match.jpg'); * // RawImage { * // "data": Uint8ClampedArray [ 25, 25, 25, 19, 19, 19, ... ], * // "width": 800, * // "height": 533, * // "channels": 3 * // } * ``` */ static async read(input) { if (input instanceof RawImage) { return input; } else if (typeof input === 'string' || input instanceof URL) { return await this.fromURL(input); } else if (input instanceof Blob) { return await this.fromBlob(input); } else if ( (typeof HTMLCanvasElement !== "undefined" && input instanceof HTMLCanvasElement) || (typeof OffscreenCanvas !== "undefined" && input instanceof OffscreenCanvas) ) { return this.fromCanvas(input); } else { throw new Error(`Unsupported input type: ${typeof input}`); } } /** * Read an image from a canvas. * @param {HTMLCanvasElement|OffscreenCanvas} canvas The canvas to read the image from. * @returns {RawImage} The image object. */ static fromCanvas(canvas) { if (!IS_BROWSER_OR_WEBWORKER) { throw new Error('fromCanvas() is only supported in browser environments.') } const ctx = /** @type {CanvasRenderingContext2D | OffscreenCanvasRenderingContext2D} */ (canvas.getContext('2d')); const data = ctx.getImageData(0, 0, canvas.width, canvas.height).data; return new RawImage(data, canvas.width, canvas.height, 4); } /** * Read an image from a URL or file path. * @param {string|URL} url The URL or file path to read the image from. * @returns {Promise<RawImage>} The image object. */ static async fromURL(url) { const response = await getFile(url); if (response.status !== 200) { throw new Error(`Unable to read image from "${url}" (${response.status} ${response.statusText})`); } const blob = await response.blob(); return this.fromBlob(blob); } /** * Helper method to create a new Image from a blob. * @param {Blob} blob The blob to read the image from. * @returns {Promise<RawImage>} The image object. */ static async fromBlob(blob) { if (IS_BROWSER_OR_WEBWORKER) { // Running in environment with canvas const img = await loadImageFunction(blob); const ctx = createCanvasFunction(img.width, img.height).getContext('2d'); // Draw image to context ctx.drawImage(img, 0, 0); return new this(ctx.getImageData(0, 0, img.width, img.height).data, img.width, img.height, 4); } else { // Use sharp.js to read (and possible resize) the image. const img = sharp(await blob.arrayBuffer()); return await loadImageFunction(img); } } /** * Helper method to create a new Image from a tensor * @param {Tensor} tensor */ static fromTensor(tensor, channel_format = 'CHW') { if (tensor.dims.length !== 3) { throw new Error(`Tensor should have 3 dimensions, but has ${tensor.dims.length} dimensions.`); } if (channel_format === 'CHW') { tensor = tensor.transpose(1, 2, 0); } else if (channel_format === 'HWC') { // Do nothing } else { throw new Error(`Unsupported channel format: ${channel_format}`); } if (!(tensor.data instanceof Uint8ClampedArray || tensor.data instanceof Uint8Array)) { throw new Error(`Unsupported tensor type: ${tensor.type}`); } switch (tensor.dims[2]) { case 1: case 2: case 3: case 4: return new RawImage(tensor.data, tensor.dims[1], tensor.dims[0], tensor.dims[2]); default: throw new Error(`Unsupported number of channels: ${tensor.dims[2]}`); } } /** * Convert the image to grayscale format. * @returns {RawImage} `this` to support chaining. */ grayscale() { if (this.channels === 1) { return this; } const newData = new Uint8ClampedArray(this.width * this.height * 1); switch (this.channels) { case 3: // rgb to grayscale case 4: // rgba to grayscale for (let i = 0, offset = 0; i < this.data.length; i += this.channels) { const red = this.data[i]; const green = this.data[i + 1]; const blue = this.data[i + 2]; newData[offset++] = Math.round(0.2989 * red + 0.5870 * green + 0.1140 * blue); } break; default: throw new Error(`Conversion failed due to unsupported number of channels: ${this.channels}`); } return this._update(newData, this.width, this.height, 1); } /** * Convert the image to RGB format. * @returns {RawImage} `this` to support chaining. */ rgb() { if (this.channels === 3) { return this; } const newData = new Uint8ClampedArray(this.width * this.height * 3); switch (this.channels) { case 1: // grayscale to rgb for (let i = 0, offset = 0; i < this.data.length; ++i) { newData[offset++] = this.data[i]; newData[offset++] = this.data[i]; newData[offset++] = this.data[i]; } break; case 4: // rgba to rgb for (let i = 0, offset = 0; i < this.data.length; i += 4) { newData[offset++] = this.data[i]; newData[offset++] = this.data[i + 1]; newData[offset++] = this.data[i + 2]; } break; default: throw new Error(`Conversion failed due to unsupported number of channels: ${this.channels}`); } return this._update(newData, this.width, this.height, 3); } /** * Convert the image to RGBA format. * @returns {RawImage} `this` to support chaining. */ rgba() { if (this.channels === 4) { return this; } const newData = new Uint8ClampedArray(this.width * this.height * 4); switch (this.channels) { case 1: // grayscale to rgba for (let i = 0, offset = 0; i < this.data.length; ++i) { newData[offset++] = this.data[i]; newData[offset++] = this.data[i]; newData[offset++] = this.data[i]; newData[offset++] = 255; } break; case 3: // rgb to rgba for (let i = 0, offset = 0; i < this.data.length; i += 3) { newData[offset++] = this.data[i]; newData[offset++] = this.data[i + 1]; newData[offset++] = this.data[i + 2]; newData[offset++] = 255; } break; default: throw new Error(`Conversion failed due to unsupported number of channels: ${this.channels}`); } return this._update(newData, this.width, this.height, 4); } /** * Apply an alpha mask to the image. Operates in place. * @param {RawImage} mask The mask to apply. It should have a single channel. * @returns {RawImage} The masked image. * @throws {Error} If the mask is not the same size as the image. * @throws {Error} If the image does not have 4 channels. * @throws {Error} If the mask is not a single channel. */ putAlpha(mask) { if (mask.width !== this.width || mask.height !== this.height) { throw new Error(`Expected mask size to be ${this.width}x${this.height}, but got ${mask.width}x${mask.height}`); } if (mask.channels !== 1) { throw new Error(`Expected mask to have 1 channel, but got ${mask.channels}`); } const this_data = this.data; const mask_data = mask.data; const num_pixels = this.width * this.height; if (this.channels === 3) { // Convert to RGBA and simultaneously apply mask to alpha channel const newData = new Uint8ClampedArray(num_pixels * 4); for (let i = 0, in_offset = 0, out_offset = 0; i < num_pixels; ++i) { newData[out_offset++] = this_data[in_offset++]; newData[out_offset++] = this_data[in_offset++]; newData[out_offset++] = this_data[in_offset++]; newData[out_offset++] = mask_data[i]; } return this._update(newData, this.width, this.height, 4); } else if (this.channels === 4) { // Apply mask to alpha channel in place for (let i = 0; i < num_pixels; ++i) { this_data[4 * i + 3] = mask_data[i]; } return this; } throw new Error(`Expected image to have 3 or 4 channels, but got ${this.channels}`); } /** * Resize the image to the given dimensions. This method uses the canvas API to perform the resizing. * @param {number} width The width of the new image. `null` or `-1` will preserve the aspect ratio. * @param {number} height The height of the new image. `null` or `-1` will preserve the aspect ratio. * @param {Object} options Additional options for resizing. * @param {0|1|2|3|4|5|string} [options.resample] The resampling method to use. * @returns {Promise<RawImage>} `this` to support chaining. */ async resize(width, height, { resample = 2, } = {}) { // Do nothing if the image already has the desired size if (this.width === width && this.height === height) { return this; } // Ensure resample method is a string let resampleMethod = RESAMPLING_MAPPING[resample] ?? resample; // Calculate width / height to maintain aspect ratio, in the event that // the user passed a null value in. // This allows users to pass in something like `resize(320, null)` to // resize to 320 width, but maintain aspect ratio. const nullish_width = isNullishDimension(width); const nullish_height = isNullishDimension(height); if (nullish_width && nullish_height) { return this; } else if (nullish_width) { width = (height / this.height) * this.width; } else if (nullish_height) { height = (width / this.width) * this.height; } if (IS_BROWSER_OR_WEBWORKER) { // TODO use `resample` in browser environment // Store number of channels before resizing const numChannels = this.channels; // Create canvas object for this image const canvas = this.toCanvas(); // Actually perform resizing using the canvas API const ctx = createCanvasFunction(width, height).getContext('2d'); // Draw image to context, resizing in the process ctx.drawImage(canvas, 0, 0, width, height); // Create image from the resized data const resizedImage = new RawImage(ctx.getImageData(0, 0, width, height).data, width, height, 4); // Convert back so that image has the same number of channels as before return resizedImage.convert(numChannels); } else { // Create sharp image from raw data, and resize let img = this.toSharp(); switch (resampleMethod) { case 'box': case 'hamming': if (resampleMethod === 'box' || resampleMethod === 'hamming') { console.warn(`Resampling method ${resampleMethod} is not yet supported. Using bilinear instead.`); resampleMethod = 'bilinear'; } case 'nearest': case 'bilinear': case 'bicubic': // Perform resizing using affine transform. // This matches how the python Pillow library does it. img = img.affine([width / this.width, 0, 0, height / this.height], { interpolator: resampleMethod }); break; case 'lanczos': // https://github.com/python-pillow/Pillow/discussions/5519 // https://github.com/lovell/sharp/blob/main/docs/api-resize.md img = img.resize({ width, height, fit: 'fill', kernel: 'lanczos3', // PIL Lanczos uses a kernel size of 3 }); break; default: throw new Error(`Resampling method ${resampleMethod} is not supported.`); } return await loadImageFunction(img); } } async pad([left, right, top, bottom]) { left = Math.max(left, 0); right = Math.max(right, 0); top = Math.max(top, 0); bottom = Math.max(bottom, 0); if (left === 0 && right === 0 && top === 0 && bottom === 0) { // No padding needed return this; } if (IS_BROWSER_OR_WEBWORKER) { // Store number of channels before padding const numChannels = this.channels; // Create canvas object for this image const canvas = this.toCanvas(); const newWidth = this.width + left + right; const newHeight = this.height + top + bottom; // Create a new canvas of the desired size. const ctx = createCanvasFunction(newWidth, newHeight).getContext('2d'); // Draw image to context, padding in the process ctx.drawImage(canvas, 0, 0, this.width, this.height, left, top, this.width, this.height ); // Create image from the padded data const paddedImage = new RawImage( ctx.getImageData(0, 0, newWidth, newHeight).data, newWidth, newHeight, 4 ); // Convert back so that image has the same number of channels as before return paddedImage.convert(numChannels); } else { const img = this.toSharp().extend({ left, right, top, bottom }); return await loadImageFunction(img); } } async crop([x_min, y_min, x_max, y_max]) { // Ensure crop bounds are within the image x_min = Math.max(x_min, 0); y_min = Math.max(y_min, 0); x_max = Math.min(x_max, this.width - 1); y_max = Math.min(y_max, this.height - 1); // Do nothing if the crop is the entire image if (x_min === 0 && y_min === 0 && x_max === this.width - 1 && y_max === this.height - 1) { return this; } const crop_width = x_max - x_min + 1; const crop_height = y_max - y_min + 1; if (IS_BROWSER_OR_WEBWORKER) { // Store number of channels before resizing const numChannels = this.channels; // Create canvas object for this image const canvas = this.toCanvas(); // Create a new canvas of the desired size. This is needed since if the // image is too small, we need to pad it with black pixels. const ctx = createCanvasFunction(crop_width, crop_height).getContext('2d'); // Draw image to context, cropping in the process ctx.drawImage(canvas, x_min, y_min, crop_width, crop_height, 0, 0, crop_width, crop_height ); // Create image from the resized data const resizedImage = new RawImage(ctx.getImageData(0, 0, crop_width, crop_height).data, crop_width, crop_height, 4); // Convert back so that image has the same number of channels as before return resizedImage.convert(numChannels); } else { // Create sharp image from raw data const img = this.toSharp().extract({ left: x_min, top: y_min, width: crop_width, height: crop_height, }); return await loadImageFunction(img); } } async center_crop(crop_width, crop_height) { // If the image is already the desired size, return it if (this.width === crop_width && this.height === crop_height) { return this; } // Determine bounds of the image in the new canvas const width_offset = (this.width - crop_width) / 2; const height_offset = (this.height - crop_height) / 2; if (IS_BROWSER_OR_WEBWORKER) { // Store number of channels before resizing const numChannels = this.channels; // Create canvas object for this image const canvas = this.toCanvas(); // Create a new canvas of the desired size. This is needed since if the // image is too small, we need to pad it with black pixels. const ctx = createCanvasFunction(crop_width, crop_height).getContext('2d'); let sourceX = 0; let sourceY = 0; let destX = 0; let destY = 0; if (width_offset >= 0) { sourceX = width_offset; } else { destX = -width_offset; } if (height_offset >= 0) { sourceY = height_offset; } else { destY = -height_offset; } // Draw image to context, cropping in the process ctx.drawImage(canvas, sourceX, sourceY, crop_width, crop_height, destX, destY, crop_width, crop_height ); // Create image from the resized data const resizedImage = new RawImage(ctx.getImageData(0, 0, crop_width, crop_height).data, crop_width, crop_height, 4); // Convert back so that image has the same number of channels as before return resizedImage.convert(numChannels); } else { // Create sharp image from raw data let img = this.toSharp(); if (width_offset >= 0 && height_offset >= 0) { // Cropped image lies entirely within the original image img = img.extract({ left: Math.floor(width_offset), top: Math.floor(height_offset), width: crop_width, height: crop_height, }) } else if (width_offset <= 0 && height_offset <= 0) { // Cropped image lies entirely outside the original image, // so we add padding const top = Math.floor(-height_offset); const left = Math.floor(-width_offset); img = img.extend({ top: top, left: left, // Ensures the resulting image has the desired dimensions right: crop_width - this.width - left, bottom: crop_height - this.height - top, }); } else { // Cropped image lies partially outside the original image. // We first pad, then crop. let y_padding = [0, 0]; let y_extract = 0; if (height_offset < 0) { y_padding[0] = Math.floor(-height_offset); y_padding[1] = crop_height - this.height - y_padding[0]; } else { y_extract = Math.floor(height_offset); } let x_padding = [0, 0]; let x_extract = 0; if (width_offset < 0) { x_padding[0] = Math.floor(-width_offset); x_padding[1] = crop_width - this.width - x_padding[0]; } else { x_extract = Math.floor(width_offset); } img = img.extend({ top: y_padding[0], bottom: y_padding[1], left: x_padding[0], right: x_padding[1], }).extract({ left: x_extract, top: y_extract, width: crop_width, height: crop_height, }) } return await loadImageFunction(img); } } async toBlob(type = 'image/png', quality = 1) { if (!IS_BROWSER_OR_WEBWORKER) { throw new Error('toBlob() is only supported in browser environments.') } const canvas = this.toCanvas(); return await canvas.convertToBlob({ type, quality }); } toTensor(channel_format = 'CHW') { let tensor = new Tensor( 'uint8', new Uint8Array(this.data), [this.height, this.width, this.channels] ); if (channel_format === 'HWC') { // Do nothing } else if (channel_format === 'CHW') { // hwc -> chw tensor = tensor.permute(2, 0, 1); } else { throw new Error(`Unsupported channel format: ${channel_format}`); } return tensor; } toCanvas() { if (!IS_BROWSER_OR_WEBWORKER) { throw new Error('toCanvas() is only supported in browser environments.') } // Clone, and convert data to RGBA before drawing to canvas. // This is because the canvas API only supports RGBA const cloned = this.clone().rgba(); // Create canvas object for the cloned image const clonedCanvas = createCanvasFunction(cloned.width, cloned.height); // Draw image to context const data = new ImageDataClass(cloned.data, cloned.width, cloned.height); clonedCanvas.getContext('2d').putImageData(data, 0, 0); return clonedCanvas; } /** * Split this image into individual bands. This method returns an array of individual image bands from an image. * For example, splitting an "RGB" image creates three new images each containing a copy of one of the original bands (red, green, blue). * * Inspired by PIL's `Image.split()` [function](https://pillow.readthedocs.io/en/latest/reference/Image.html#PIL.Image.Image.split). * @returns {RawImage[]} An array containing bands. */ split() { const { data, width, height, channels } = this; /** @type {typeof Uint8Array | typeof Uint8ClampedArray} */ const data_type = /** @type {any} */(data.constructor); const per_channel_length = data.length / channels; // Pre-allocate buffers for each channel const split_data = Array.from( { length: channels }, () => new data_type(per_channel_length), ); // Write pixel data for (let i = 0; i < per_channel_length; ++i) { const data_offset = channels * i; for (let j = 0; j < channels; ++j) { split_data[j][i] = data[data_offset + j]; } } return split_data.map((data) => new RawImage(data, width, height, 1)); } /** * Helper method to update the image data. * @param {Uint8ClampedArray} data The new image data. * @param {number} width The new width of the image. * @param {number} height The new height of the image. * @param {1|2|3|4|null} [channels] The new number of channels of the image. * @private */ _update(data, width, height, channels = null) { this.data = data; this.width = width; this.height = height; if (channels !== null) { this.channels = channels; } return this; } /** * Clone the image * @returns {RawImage} The cloned image */ clone() { return new RawImage(this.data.slice(), this.width, this.height, this.channels); } /** * Helper method for converting image to have a certain number of channels * @param {number} numChannels The number of channels. Must be 1, 3, or 4. * @returns {RawImage} `this` to support chaining. */ convert(numChannels) { if (this.channels === numChannels) return this; // Already correct number of channels switch (numChannels) { case 1: this.grayscale(); break; case 3: this.rgb(); break; case 4: this.rgba(); break; default: throw new Error(`Conversion failed due to unsupported number of channels: ${this.channels}`); } return this; } /** * Save the image to the given path. * @param {string} path The path to save the image to. */ async save(path) { if (IS_BROWSER_OR_WEBWORKER) { if (apis.IS_WEBWORKER_ENV) { throw new Error('Unable to save an image from a Web Worker.') } const extension = path.split('.').pop().toLowerCase(); const mime = CONTENT_TYPE_MAP.get(extension) ?? 'image/png'; // Convert image to Blob const blob = await this.toBlob(mime); saveBlob(path, blob) } else if (!apis.IS_FS_AVAILABLE) { throw new Error('Unable to save the image because filesystem is disabled in this environment.') } else { const img = this.toSharp(); return await img.toFile(path); } } toSharp() { if (IS_BROWSER_OR_WEBWORKER) { throw new Error('toSharp() is only supported in server-side environments.') } return sharp(this.data, { raw: { width: this.width, height: this.height, channels: this.channels } }); } } /** * Helper function to load an image from a URL, path, etc. */ export const load_image = RawImage.read.bind(RawImage);
transformers.js/src/utils/image.js/0
{ "file_path": "transformers.js/src/utils/image.js", "repo_id": "transformers.js", "token_count": 13943 }
349
import { DistilBertTokenizer } from "../../../src/tokenizers.js"; import { BASE_TEST_STRINGS, BERT_TEST_STRINGS } from "../test_strings.js"; export const TOKENIZER_CLASS = DistilBertTokenizer; export const TEST_CONFIG = { "Xenova/distilbert-base-cased-distilled-squad": { SIMPLE: { text: BASE_TEST_STRINGS.SIMPLE, tokens: ["How", "are", "you", "doing", "?"], ids: [101, 1731, 1132, 1128, 1833, 136, 102], decoded: "[CLS] How are you doing? [SEP]", }, SIMPLE_WITH_PUNCTUATION: { text: BASE_TEST_STRINGS.SIMPLE_WITH_PUNCTUATION, tokens: ["You", "should", "'", "ve", "done", "this"], ids: [101, 1192, 1431, 112, 1396, 1694, 1142, 102], decoded: "[CLS] You should've done this [SEP]", }, NUMBERS: { text: BASE_TEST_STRINGS.NUMBERS, tokens: ["01", "##23", "##45", "##6", "##7", "##8", "##9", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "100", "1000"], ids: [101, 5187, 22737, 21336, 1545, 1559, 1604, 1580, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 1275, 1620, 6087, 102], decoded: "[CLS] 0123456789 0 1 2 3 4 5 6 7 8 9 10 100 1000 [SEP]", }, TEXT_WITH_NUMBERS: { text: BASE_TEST_STRINGS.TEXT_WITH_NUMBERS, tokens: ["The", "company", "was", "founded", "in", "2016", "."], ids: [101, 1109, 1419, 1108, 1771, 1107, 1446, 119, 102], decoded: "[CLS] The company was founded in 2016. [SEP]", }, PUNCTUATION: { text: BASE_TEST_STRINGS.PUNCTUATION, tokens: ["A", "'", "ll", "!", "!", "to", "?", "'", "d", "'", "'", "d", "of", ",", "can", "'", "t", "."], ids: [101, 138, 112, 1325, 106, 106, 1106, 136, 112, 173, 112, 112, 173, 1104, 117, 1169, 112, 189, 119, 102], decoded: "[CLS] A'll!! to?'d'' d of, can't. [SEP]", }, PYTHON_CODE: { text: BASE_TEST_STRINGS.PYTHON_CODE, tokens: ["def", "main", "(", ")", ":", "pass"], ids: [101, 19353, 1514, 113, 114, 131, 2789, 102], decoded: "[CLS] def main ( ) : pass [SEP]", }, JAVASCRIPT_CODE: { text: BASE_TEST_STRINGS.JAVASCRIPT_CODE, tokens: ["let", "a", "=", "o", "##b", "##j", ".", "to", "##S", "##tring", "(", ")", ";", "to", "##S", "##tring", "(", ")", ";"], ids: [101, 1519, 170, 134, 184, 1830, 3361, 119, 1106, 1708, 28108, 113, 114, 132, 1106, 1708, 28108, 113, 114, 132, 102], decoded: "[CLS] let a = obj. toString ( ) ; toString ( ) ; [SEP]", }, NEWLINES: { text: BASE_TEST_STRINGS.NEWLINES, tokens: ["This", "is", "a", "test", "."], ids: [101, 1188, 1110, 170, 2774, 119, 102], decoded: "[CLS] This is a test. [SEP]", }, BASIC: { text: BASE_TEST_STRINGS.BASIC, tokens: ["UN", "##wan", "##t\u00e9", "##d", ",", "running"], ids: [101, 7414, 5491, 14608, 1181, 117, 1919, 102], decoded: "[CLS] UNwant\u00e9d, running [SEP]", }, CONTROL_TOKENS: { text: BASE_TEST_STRINGS.CONTROL_TOKENS, tokens: ["123"], ids: [101, 13414, 102], decoded: "[CLS] 123 [SEP]", }, HELLO_WORLD_TITLECASE: { text: BASE_TEST_STRINGS.HELLO_WORLD_TITLECASE, tokens: ["Hello", "World"], ids: [101, 8667, 1291, 102], decoded: "[CLS] Hello World [SEP]", }, HELLO_WORLD_LOWERCASE: { text: BASE_TEST_STRINGS.HELLO_WORLD_LOWERCASE, tokens: ["hello", "world"], ids: [101, 19082, 1362, 102], decoded: "[CLS] hello world [SEP]", }, CHINESE_ONLY: { text: BASE_TEST_STRINGS.CHINESE_ONLY, tokens: ["\u751f", "[UNK]", "[UNK]", "\u771f", "[UNK]", "[UNK]"], ids: [101, 1056, 100, 100, 1061, 100, 100, 102], decoded: "[CLS] \u751f [UNK] [UNK] \u771f [UNK] [UNK] [SEP]", }, LEADING_SPACE: { text: BASE_TEST_STRINGS.LEADING_SPACE, tokens: ["leading", "space"], ids: [101, 2020, 2000, 102], decoded: "[CLS] leading space [SEP]", }, TRAILING_SPACE: { text: BASE_TEST_STRINGS.TRAILING_SPACE, tokens: ["trailing", "space"], ids: [101, 13161, 2000, 102], decoded: "[CLS] trailing space [SEP]", }, DOUBLE_SPACE: { text: BASE_TEST_STRINGS.DOUBLE_SPACE, tokens: ["Hi", "Hello"], ids: [101, 8790, 8667, 102], decoded: "[CLS] Hi Hello [SEP]", }, CURRENCY: { text: BASE_TEST_STRINGS.CURRENCY, tokens: ["test", "$", "1", "R", "##2", "#", "3", "\u20ac", "##4", "\u00a3", "##5", "\u00a5", "##6", "[UNK]", "\u20b9", "##8", "\u20b1", "##9", "test"], ids: [101, 2774, 109, 122, 155, 1477, 108, 124, 836, 1527, 202, 1571, 203, 1545, 100, 838, 1604, 837, 1580, 2774, 102], decoded: "[CLS] test $ 1 R2 # 3 \u20ac4 \u00a35 \u00a56 [UNK] \u20b98 \u20b19 test [SEP]", }, CURRENCY_WITH_DECIMALS: { text: BASE_TEST_STRINGS.CURRENCY_WITH_DECIMALS, tokens: ["I", "bought", "an", "apple", "for", "$", "1", ".", "00", "at", "the", "store", "."], ids: [101, 146, 3306, 1126, 12075, 1111, 109, 122, 119, 3135, 1120, 1103, 2984, 119, 102], decoded: "[CLS] I bought an apple for $ 1. 00 at the store. [SEP]", }, ELLIPSIS: { text: BASE_TEST_STRINGS.ELLIPSIS, tokens: ["you", "\u2026"], ids: [101, 1128, 795, 102], decoded: "[CLS] you \u2026 [SEP]", }, TEXT_WITH_ESCAPE_CHARACTERS: { text: BASE_TEST_STRINGS.TEXT_WITH_ESCAPE_CHARACTERS, tokens: ["you", "\u2026"], ids: [101, 1128, 795, 102], decoded: "[CLS] you \u2026 [SEP]", }, TEXT_WITH_ESCAPE_CHARACTERS_2: { text: BASE_TEST_STRINGS.TEXT_WITH_ESCAPE_CHARACTERS_2, tokens: ["you", "\u2026", "you", "\u2026"], ids: [101, 1128, 795, 1128, 795, 102], decoded: "[CLS] you \u2026 you \u2026 [SEP]", }, TILDE_NORMALIZATION: { text: BASE_TEST_STRINGS.TILDE_NORMALIZATION, tokens: ["weird", "[UNK]", "edge", "[UNK]", "case"], ids: [101, 6994, 100, 2652, 100, 1692, 102], decoded: "[CLS] weird [UNK] edge [UNK] case [SEP]", }, SPIECE_UNDERSCORE: { text: BASE_TEST_STRINGS.SPIECE_UNDERSCORE, tokens: ["[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]", "."], ids: [101, 100, 100, 100, 100, 100, 119, 102], decoded: "[CLS] [UNK] [UNK] [UNK] [UNK] [UNK]. [SEP]", }, POPULAR_EMOJIS: { text: BASE_TEST_STRINGS.POPULAR_EMOJIS, tokens: ["[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]"], ids: [101, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 102], decoded: "[CLS] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [SEP]", }, MULTIBYTE_EMOJIS: { text: BASE_TEST_STRINGS.MULTIBYTE_EMOJIS, tokens: ["[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]", "[UNK]"], ids: [101, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 102], decoded: "[CLS] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [UNK] [SEP]", }, }, "Xenova/distilbert-base-uncased-finetuned-sst-2-english": { SIMPLE: { text: BASE_TEST_STRINGS.SIMPLE, tokens: ["how", "are", "you", "doing", "?"], ids: [101, 2129, 2024, 2017, 2725, 1029, 102], decoded: "[CLS] how are you doing? [SEP]", }, SIMPLE_WITH_PUNCTUATION: { text: BASE_TEST_STRINGS.SIMPLE_WITH_PUNCTUATION, tokens: ["you", "should", "'", "ve", "done", "this"], ids: [101, 2017, 2323, 1005, 2310, 2589, 2023, 102], decoded: "[CLS] you should've done this [SEP]", }, TEXT_WITH_NUMBERS: { text: BASE_TEST_STRINGS.TEXT_WITH_NUMBERS, tokens: ["the", "company", "was", "founded", "in", "2016", "."], ids: [101, 1996, 2194, 2001, 2631, 1999, 2355, 1012, 102], decoded: "[CLS] the company was founded in 2016. [SEP]", }, PUNCTUATION: { text: BASE_TEST_STRINGS.PUNCTUATION, tokens: ["a", "'", "ll", "!", "!", "to", "?", "'", "d", "'", "'", "d", "of", ",", "can", "'", "t", "."], ids: [101, 1037, 1005, 2222, 999, 999, 2000, 1029, 1005, 1040, 1005, 1005, 1040, 1997, 1010, 2064, 1005, 1056, 1012, 102], decoded: "[CLS] a'll!! to?'d'' d of, can't. [SEP]", }, JAVASCRIPT_CODE: { text: BASE_TEST_STRINGS.JAVASCRIPT_CODE, tokens: ["let", "a", "=", "ob", "##j", ".", "to", "##st", "##ring", "(", ")", ";", "to", "##st", "##ring", "(", ")", ";"], ids: [101, 2292, 1037, 1027, 27885, 3501, 1012, 2000, 3367, 4892, 1006, 1007, 1025, 2000, 3367, 4892, 1006, 1007, 1025, 102], decoded: "[CLS] let a = obj. tostring ( ) ; tostring ( ) ; [SEP]", }, NEWLINES: { text: BASE_TEST_STRINGS.NEWLINES, tokens: ["this", "is", "a", "test", "."], ids: [101, 2023, 2003, 1037, 3231, 1012, 102], decoded: "[CLS] this is a test. [SEP]", }, BASIC: { text: BASE_TEST_STRINGS.BASIC, tokens: ["unwanted", ",", "running"], ids: [101, 18162, 1010, 2770, 102], decoded: "[CLS] unwanted, running [SEP]", }, CHINESE_ONLY: { text: BASE_TEST_STRINGS.CHINESE_ONLY, tokens: ["\u751f", "[UNK]", "\u7684", "\u771f", "[UNK]", "[UNK]"], ids: [101, 1910, 100, 1916, 1921, 100, 100, 102], decoded: "[CLS] \u751f [UNK] \u7684 \u771f [UNK] [UNK] [SEP]", }, DOUBLE_SPACE: { text: BASE_TEST_STRINGS.DOUBLE_SPACE, tokens: ["hi", "hello"], ids: [101, 7632, 7592, 102], decoded: "[CLS] hi hello [SEP]", }, CURRENCY: { text: BASE_TEST_STRINGS.CURRENCY, tokens: ["test", "$", "1", "r", "##2", "#", "3", "\u20ac", "##4", "\u00a35", "\u00a5", "##6", "[UNK]", "\u20b9", "##8", "\u20b1", "##9", "test"], ids: [101, 3231, 1002, 1015, 1054, 2475, 1001, 1017, 1574, 2549, 27813, 1071, 2575, 100, 1576, 2620, 1575, 2683, 3231, 102], decoded: "[CLS] test $ 1 r2 # 3 \u20ac4 \u00a35 \u00a56 [UNK] \u20b98 \u20b19 test [SEP]", }, CURRENCY_WITH_DECIMALS: { text: BASE_TEST_STRINGS.CURRENCY_WITH_DECIMALS, tokens: ["i", "bought", "an", "apple", "for", "$", "1", ".", "00", "at", "the", "store", "."], ids: [101, 1045, 4149, 2019, 6207, 2005, 1002, 1015, 1012, 4002, 2012, 1996, 3573, 1012, 102], decoded: "[CLS] i bought an apple for $ 1. 00 at the store. [SEP]", }, TILDE_NORMALIZATION: { text: BASE_TEST_STRINGS.TILDE_NORMALIZATION, tokens: ["weird", "\uff5e", "edge", "\uff5e", "case"], ids: [101, 6881, 1995, 3341, 1995, 2553, 102], decoded: "[CLS] weird \uff5e edge \uff5e case [SEP]", }, }, "Xenova/distiluse-base-multilingual-cased-v2": { NUMBERS: { text: BASE_TEST_STRINGS.NUMBERS, tokens: ["012", "##34", "##5", "##6", "##7", "##8", "##9", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "100", "1000"], ids: [101, 69878, 78301, 11166, 11211, 11305, 11396, 11373, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 10150, 10407, 12186, 102], decoded: "[CLS] 0123456789 0 1 2 3 4 5 6 7 8 9 10 100 1000 [SEP]", }, JAVASCRIPT_CODE: { text: BASE_TEST_STRINGS.JAVASCRIPT_CODE, tokens: ["let", "a", "=", "ob", "##j", ".", "to", "##S", "##trin", "##g", "(", ")", ";", "to", "##S", "##trin", "##g", "(", ")", ";"], ids: [101, 13595, 169, 134, 17339, 10418, 119, 10114, 10731, 109163, 10240, 113, 114, 132, 10114, 10731, 109163, 10240, 113, 114, 132, 102], decoded: "[CLS] let a = obj. toString ( ) ; toString ( ) ; [SEP]", }, BASIC: { text: BASE_TEST_STRINGS.BASIC, tokens: ["UN", "##want", "##\u00e9d", ",", "running"], ids: [101, 26578, 104216, 84193, 117, 18020, 102], decoded: "[CLS] UNwant\u00e9d, running [SEP]", }, HELLO_WORLD_LOWERCASE: { text: BASE_TEST_STRINGS.HELLO_WORLD_LOWERCASE, tokens: ["hell", "##o", "world"], ids: [101, 61694, 10133, 11356, 102], decoded: "[CLS] hello world [SEP]", }, CHINESE_ONLY: { text: BASE_TEST_STRINGS.CHINESE_ONLY, tokens: ["\u751f", "\u6d3b", "\u7684", "\u771f", "\u8c1b", "\u662f"], ids: [101, 5600, 4978, 5718, 5769, 7378, 4380, 102], decoded: "[CLS] \u751f \u6d3b \u7684 \u771f \u8c1b \u662f [SEP]", }, TRAILING_SPACE: { text: BASE_TEST_STRINGS.TRAILING_SPACE, tokens: ["trail", "##ing", "space"], ids: [101, 56559, 10230, 16199, 102], decoded: "[CLS] trailing space [SEP]", }, CURRENCY: { text: BASE_TEST_STRINGS.CURRENCY, tokens: ["test", "$", "1", "R2", "#", "3", "\u20ac", "##4", "\u00a3", "##5", "\u00a5", "##6", "[UNK]", "\u20b9", "##8", "[UNK]", "test"], ids: [101, 15839, 109, 122, 94000, 108, 124, 1775, 11011, 201, 11166, 202, 11211, 100, 1776, 11396, 100, 15839, 102], decoded: "[CLS] test $ 1 R2 # 3 \u20ac4 \u00a35 \u00a56 [UNK] \u20b98 [UNK] test [SEP]", }, CURRENCY_WITH_DECIMALS: { text: BASE_TEST_STRINGS.CURRENCY_WITH_DECIMALS, tokens: ["I", "bought", "an", "app", "##le", "for", "$", "1", ".", "00", "at", "the", "store", "."], ids: [101, 146, 28870, 10151, 72894, 10284, 10142, 109, 122, 119, 11025, 10160, 10105, 13708, 119, 102], decoded: "[CLS] I bought an apple for $ 1. 00 at the store. [SEP]", }, ELLIPSIS: { text: BASE_TEST_STRINGS.ELLIPSIS, tokens: ["you", "[UNK]"], ids: [101, 13028, 100, 102], decoded: "[CLS] you [UNK] [SEP]", }, TEXT_WITH_ESCAPE_CHARACTERS: { text: BASE_TEST_STRINGS.TEXT_WITH_ESCAPE_CHARACTERS, tokens: ["you", "[UNK]"], ids: [101, 13028, 100, 102], decoded: "[CLS] you [UNK] [SEP]", }, TEXT_WITH_ESCAPE_CHARACTERS_2: { text: BASE_TEST_STRINGS.TEXT_WITH_ESCAPE_CHARACTERS_2, tokens: ["you", "[UNK]", "you", "[UNK]"], ids: [101, 13028, 100, 13028, 100, 102], decoded: "[CLS] you [UNK] you [UNK] [SEP]", }, TILDE_NORMALIZATION: { text: BASE_TEST_STRINGS.TILDE_NORMALIZATION, tokens: ["wei", "##rd", "\uff5e", "edge", "\uff5e", "case"], ids: [101, 86981, 12023, 10096, 30599, 10096, 13474, 102], decoded: "[CLS] weird \uff5e edge \uff5e case [SEP]", }, }, // `model.type` field missing in tokenizer.json "distilbert/distilbert-base-multilingual-cased": { CHINESE_LATIN_MIXED: { text: BERT_TEST_STRINGS.CHINESE_LATIN_MIXED, tokens: ["ah", "\u535a", "\u63a8", "z", "##z"], ids: [101, 69863, 2684, 4163, 194, 10305, 102], decoded: "[CLS] ah \u535a \u63a8 zz [SEP]", }, }, };
transformers.js/tests/models/distilbert/test_tokenization_distilbert.js/0
{ "file_path": "transformers.js/tests/models/distilbert/test_tokenization_distilbert.js", "repo_id": "transformers.js", "token_count": 7676 }
350
import { LlavaForConditionalGeneration, RawImage, LlavaProcessor } from "../../../src/transformers.js"; import { MAX_MODEL_LOAD_TIME, MAX_TEST_EXECUTION_TIME, MAX_MODEL_DISPOSE_TIME, DEFAULT_MODEL_OPTIONS } from "../../init.js"; export default () => { const prompts = [ // Example adapted from https://huggingface.co/docs/transformers/model_doc/llava#transformers.LlavaForConditionalGeneration.forward.example "USER: <image>\nWhat's the content of the image? ASSISTANT:", "<image>Hi", ]; // Empty white image const dims = [224, 224, 3]; const image = new RawImage(new Uint8ClampedArray(dims[0] * dims[1] * dims[2]).fill(255), ...dims); describe("LlavaForConditionalGeneration", () => { const model_id = "Xenova/tiny-random-LlavaForConditionalGeneration"; /** @type {LlavaForConditionalGeneration} */ let model; /** @type {LlavaProcessor} */ let processor; beforeAll(async () => { model = await LlavaForConditionalGeneration.from_pretrained(model_id, DEFAULT_MODEL_OPTIONS); processor = await LlavaProcessor.from_pretrained(model_id); }, MAX_MODEL_LOAD_TIME); it( "forward", async () => { const inputs = await processor(image, prompts[0]); const { logits } = await model(inputs); expect(logits.dims).toEqual([1, 246, 32002]); expect(logits.mean().item()).toBeCloseTo(-0.0005688573000952601, 8); }, MAX_TEST_EXECUTION_TIME, ); it( "batch_size=1", async () => { const inputs = await processor(image, prompts[0]); const generate_ids = await model.generate({ ...inputs, max_new_tokens: 10 }); expect(generate_ids.dims).toEqual([1, 256]); const new_ids = generate_ids.slice(null, [inputs.input_ids.dims[1], null]); expect(new_ids.tolist()).toEqual([[21557n, 16781n, 27238n, 8279n, 20454n, 11927n, 12462n, 12306n, 2414n, 7561n]]); }, MAX_TEST_EXECUTION_TIME, ); it( "batch_size>1", async () => { const inputs = await processor([image, image], prompts, { padding: true, }); const generate_ids = await model.generate({ ...inputs, max_new_tokens: 10 }); const new_ids = generate_ids.slice(null, [inputs.input_ids.dims[1], null]); expect(new_ids.tolist()).toEqual([ [21557n, 16781n, 27238n, 8279n, 20454n, 11927n, 12462n, 12306n, 2414n, 7561n], [1217n, 22958n, 22913n, 10381n, 148n, 31410n, 31736n, 7358n, 9150n, 28635n], ]); }, MAX_TEST_EXECUTION_TIME, ); it( "generate w/ past_key_values", async () => { // Empty white image const dims = [224, 224, 3]; const image = new RawImage(new Uint8ClampedArray(dims[0] * dims[1] * dims[2]).fill(255), ...dims); const inputs = await processor(image, prompts[0]); // Generate first sequence w/o PKV // NOTE: `return_dict_in_generate=true` is required to get PKV const { past_key_values, sequences } = await model.generate({ ...inputs, max_new_tokens: 5, do_sample: false, return_dict_in_generate: true, }); // Run w/o PKV const generated_ids = await model.generate({ ...inputs, max_new_tokens: 8, do_sample: false, }); // Run w/ PKV const generated_ids_pkv = await model.generate({ input_ids: sequences, past_key_values, max_new_tokens: 3, do_sample: false, }); const result = generated_ids.slice(null, [inputs.input_ids.dims[1], null]).tolist(); const result_pkv = generated_ids_pkv.slice(null, [inputs.input_ids.dims[1], null]).tolist(); // Ensure output is the same and correct const target = [[21557n, 16781n, 27238n, 8279n, 20454n, 11927n, 12462n, 12306n]]; expect(result).toEqual(target); expect(result_pkv).toEqual(target); }, MAX_TEST_EXECUTION_TIME, ); afterAll(async () => { await model?.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); };
transformers.js/tests/models/llava/test_modeling_llava.js/0
{ "file_path": "transformers.js/tests/models/llava/test_modeling_llava.js", "repo_id": "transformers.js", "token_count": 1863 }
351
import { SamProcessor, SamModel } from "../../../src/transformers.js"; import { load_cached_image } from "../../asset_cache.js"; import { MAX_MODEL_LOAD_TIME, MAX_TEST_EXECUTION_TIME, MAX_MODEL_DISPOSE_TIME, DEFAULT_MODEL_OPTIONS } from "../../init.js"; export default () => { describe("SamModel", () => { const model_id = "Xenova/slimsam-77-uniform"; /** @type {SamModel} */ let model; /** @type {SamProcessor} */ let processor; beforeAll(async () => { model = await SamModel.from_pretrained(model_id, DEFAULT_MODEL_OPTIONS); processor = await SamProcessor.from_pretrained(model_id); }, MAX_MODEL_LOAD_TIME); it( "w/ input_points", async () => { // Prepare image and input points const raw_image = await load_cached_image("corgi"); const input_points = [[[340, 250]]]; // Process inputs and perform mask generation const inputs = await processor(raw_image, { input_points }); const { pred_masks, iou_scores } = await model(inputs); expect(pred_masks.dims).toEqual([1, 1, 3, 256, 256]); expect(pred_masks.mean().item()).toBeCloseTo(-5.769824981689453, 3); expect(iou_scores.dims).toEqual([1, 1, 3]); expect(iou_scores.tolist()).toBeCloseToNested([[[0.8583833575248718, 0.9773167967796326, 0.8511142730712891]]]); // Post-process masks const masks = await processor.post_process_masks(pred_masks, inputs.original_sizes, inputs.reshaped_input_sizes); expect(masks).toHaveLength(1); expect(masks[0].dims).toEqual([1, 3, 410, 614]); expect(masks[0].type).toEqual("bool"); }, MAX_TEST_EXECUTION_TIME, ); afterAll(async () => { await model?.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); };
transformers.js/tests/models/sam/test_modeling_sam.js/0
{ "file_path": "transformers.js/tests/models/sam/test_modeling_sam.js", "repo_id": "transformers.js", "token_count": 771 }
352
import { pipeline, QuestionAnsweringPipeline } from "../../src/transformers.js"; import { MAX_MODEL_LOAD_TIME, MAX_TEST_EXECUTION_TIME, MAX_MODEL_DISPOSE_TIME, DEFAULT_MODEL_OPTIONS } from "../init.js"; const PIPELINE_ID = "question-answering"; export default () => { describe("Question Answering", () => { const model_id = "hf-internal-testing/tiny-random-BertForQuestionAnswering"; /** @type {QuestionAnsweringPipeline} */ let pipe; beforeAll(async () => { pipe = await pipeline(PIPELINE_ID, model_id, DEFAULT_MODEL_OPTIONS); }, MAX_MODEL_LOAD_TIME); it("should be an instance of QuestionAnsweringPipeline", () => { expect(pipe).toBeInstanceOf(QuestionAnsweringPipeline); }); describe("batch_size=1", () => { it( "default (top_k=1)", async () => { const output = await pipe("a", "b c"); const target = { score: 0.11395696550607681, /* start: 0, end: 1, */ answer: "b" }; expect(output).toBeCloseToNested(target, 5); }, MAX_TEST_EXECUTION_TIME, ); it( "custom (top_k=3)", async () => { const output = await pipe("a", "b c", { top_k: 3 }); const target = [ { score: 0.11395696550607681, /* start: 0, end: 1, */ answer: "b" }, { score: 0.11300431191921234, /* start: 2, end: 3, */ answer: "c" }, { score: 0.10732574015855789, /* start: 0, end: 3, */ answer: "b c" }, ]; expect(output).toBeCloseToNested(target, 5); }, MAX_TEST_EXECUTION_TIME, ); }); afterAll(async () => { await pipe.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); };
transformers.js/tests/pipelines/test_pipelines_question_answering.js/0
{ "file_path": "transformers.js/tests/pipelines/test_pipelines_question_answering.js", "repo_id": "transformers.js", "token_count": 775 }
353
import { // Models AutoModelForSeq2SeqLM, AutoModelForCausalLM, LlamaForCausalLM, LlavaForConditionalGeneration, // Tokenizers AutoTokenizer, LlamaTokenizer, // Processors AutoProcessor, Processor, // Other TextStreamer, RawImage, } from "../../src/transformers.js"; import { init, MAX_TEST_EXECUTION_TIME, MAX_MODEL_LOAD_TIME, MAX_MODEL_DISPOSE_TIME, DEFAULT_MODEL_OPTIONS } from "../init.js"; // Initialise the testing environment init(); // Helper function to generate text const generate = async (model, tokenizer, text, options) => { const inputs = tokenizer(text); return await model.generate({ ...inputs, ...options, }); }; describe("Generation parameters", () => { // List all models which will be tested const models = [ "hf-internal-testing/tiny-random-T5ForConditionalGeneration", // encoder-decoder "hf-internal-testing/tiny-random-LlamaForCausalLM", // decoder-only ]; const DUMMY_TEXT = "hello"; describe(`encoder-decoder (${models[0]})`, () => { const model_id = models[0]; let model; let tokenizer; beforeAll(async () => { model = await AutoModelForSeq2SeqLM.from_pretrained(model_id, DEFAULT_MODEL_OPTIONS); tokenizer = await AutoTokenizer.from_pretrained(model_id); }, MAX_MODEL_LOAD_TIME); // NOTE: Since `max_length` defaults to 20, this case also tests that. it( "default", async () => { const outputs = await generate(model, tokenizer, DUMMY_TEXT, {}); expect(outputs.dims.at(-1)).toEqual(20); }, MAX_TEST_EXECUTION_TIME, ); it( "max_new_tokens", async () => { const MAX_NEW_TOKENS = 5; const outputs = await generate(model, tokenizer, DUMMY_TEXT, { max_new_tokens: MAX_NEW_TOKENS, }); expect(outputs.dims.at(-1)).toEqual(MAX_NEW_TOKENS + 1); // + 1 due to forced BOS token }, MAX_TEST_EXECUTION_TIME, ); it( "min_length", async () => { const MIN_LENGTH = 3; const MAX_LENGTH = 5; const outputs = await generate(model, tokenizer, DUMMY_TEXT, { eos_token_id: 0, min_length: MIN_LENGTH, max_length: MAX_LENGTH, }); expect(outputs.tolist()).toEqual([[0n, 11924n, 11924n, 11924n, 11924n]]); expect(outputs.dims.at(-1)).toBeGreaterThanOrEqual(MIN_LENGTH); }, MAX_TEST_EXECUTION_TIME, ); it( "min_new_tokens", async () => { const MIN_NEW_TOKENS = 2; const MAX_LENGTH = 5; const outputs = await generate(model, tokenizer, DUMMY_TEXT, { eos_token_id: 0, min_new_tokens: MIN_NEW_TOKENS, max_length: MAX_LENGTH, }); expect(outputs.tolist()).toEqual([[0n, 11924n, 11924n, 11924n, 11924n]]); expect(outputs.dims.at(-1)).toBeGreaterThanOrEqual(MIN_NEW_TOKENS); }, MAX_TEST_EXECUTION_TIME, ); afterAll(async () => { await model?.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); describe(`decoder-only (${models[1]})`, () => { const model_id = models[1]; let model; let tokenizer; beforeAll(async () => { model = await AutoModelForCausalLM.from_pretrained(model_id, DEFAULT_MODEL_OPTIONS); tokenizer = await AutoTokenizer.from_pretrained(model_id); }, MAX_MODEL_LOAD_TIME); // NOTE: Since `max_length` defaults to 20, this case also tests that. it( "default", async () => { const outputs = await generate(model, tokenizer, DUMMY_TEXT, {}); expect(outputs.dims.at(-1)).toEqual(20); }, MAX_TEST_EXECUTION_TIME, ); it( "max_new_tokens", async () => { const MAX_NEW_TOKENS = 5; const PROMPT_LENGTH = 2; // BOS + DUMMY_TEXT const outputs = await generate(model, tokenizer, DUMMY_TEXT, { max_new_tokens: MAX_NEW_TOKENS, }); const expected_length = PROMPT_LENGTH + MAX_NEW_TOKENS; expect(outputs.dims.at(-1)).toEqual(expected_length); }, MAX_TEST_EXECUTION_TIME, ); it( "min_length", async () => { const MIN_LENGTH = 4; const outputs = await generate(model, tokenizer, DUMMY_TEXT, { eos_token_id: [ 18547, // min_length will suppress this token (generated by default) 16012, // stop at this token ], min_length: MIN_LENGTH, }); expect(outputs.tolist()).toEqual([[1n, 22172n, 31583n, 18824n, 16621n, 8136n, 16012n]]); expect(outputs.dims.at(-1)).toBeGreaterThanOrEqual(MIN_LENGTH); }, MAX_TEST_EXECUTION_TIME, ); it( "min_new_tokens", async () => { const MIN_NEW_TOKENS = 2; const outputs = await generate(model, tokenizer, DUMMY_TEXT, { eos_token_id: [ 18547, // min_new_tokens will suppress this token (generated by default) 16012, // stop at this token ], min_new_tokens: MIN_NEW_TOKENS, }); expect(outputs.tolist()).toEqual([[1n, 22172n, 31583n, 18824n, 16621n, 8136n, 16012n]]); expect(outputs.dims.at(-1)).toBeGreaterThanOrEqual(MIN_NEW_TOKENS); }, MAX_TEST_EXECUTION_TIME, ); afterAll(async () => { await model?.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); }); describe("Streamers", () => { describe("decoder-only", () => { const model_id = "hf-internal-testing/tiny-random-LlamaForCausalLM"; let model, tokenizer; beforeAll(async () => { model = await AutoModelForCausalLM.from_pretrained(model_id, DEFAULT_MODEL_OPTIONS); tokenizer = await AutoTokenizer.from_pretrained(model_id); }, MAX_MODEL_LOAD_TIME); it( "batch_size=1", async () => { const target_chunks = ["hello", "erdingsdelete ", "melytabular ", "Stadiumoba ", "alcune ", "drug"]; const chunks = []; const callback_function = (text) => { chunks.push(text); }; const streamer = new TextStreamer(tokenizer, { callback_function, skip_special_tokens: true }); const inputs = tokenizer("hello"); const outputs = await model.generate({ ...inputs, max_length: 10, streamer, }); expect(outputs.tolist()).toEqual([[1n, 22172n, 18547n, 8143n, 22202n, 9456n, 17213n, 15330n, 26591n, 15721n]]); expect(chunks).toEqual(target_chunks); }, MAX_TEST_EXECUTION_TIME, ); afterAll(async () => { await model?.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); }); describe("PKV caching", () => { describe("LlamaForCausalLM", () => { const model_id = "hf-internal-testing/tiny-random-LlamaForCausalLM"; /** @type {LlamaForCausalLM} */ let model; /** @type {LlamaTokenizer} */ let tokenizer; beforeAll(async () => { model = await LlamaForCausalLM.from_pretrained(model_id, DEFAULT_MODEL_OPTIONS); tokenizer = await LlamaTokenizer.from_pretrained(model_id); }, MAX_MODEL_LOAD_TIME); it( "batch_size=1", async () => { const inputs = tokenizer("1"); // Generate first sequence w/o PKV // NOTE: `return_dict_in_generate=true` is required to get PKV const { past_key_values, sequences } = await model.generate({ ...inputs, max_new_tokens: 5, do_sample: false, return_dict_in_generate: true, }); // Update output with new text const decoded = tokenizer.batch_decode(sequences, { skip_special_tokens: false, })[0]; const new_inputs = tokenizer(decoded + "2", { add_special_tokens: false, }); // Run w/o PKV const generated_ids = await model.generate({ ...new_inputs, max_new_tokens: 3, do_sample: false, }); // Run w/ PKV const generated_ids_pkv = await model.generate({ ...new_inputs, past_key_values, max_new_tokens: 3, do_sample: false, }); const target = [[1n, 259n, 29896n, 24959n, 22063n, 17192n, 12189n, 22468n, 29906n, 3399n, 24823n, 26470n]]; expect(generated_ids.tolist()).toEqual(target); expect(generated_ids_pkv.tolist()).toEqual(target); }, MAX_TEST_EXECUTION_TIME, ); afterAll(async () => { await model?.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); describe("LlamaForCausalLM (onnxruntime-genai)", () => { const model_id = "onnx-internal-testing/tiny-random-LlamaForCausalLM-GQA"; /** @type {LlamaForCausalLM} */ let model; /** @type {LlamaTokenizer} */ let tokenizer; beforeAll(async () => { model = await LlamaForCausalLM.from_pretrained(model_id, DEFAULT_MODEL_OPTIONS); tokenizer = await LlamaTokenizer.from_pretrained(model_id); }, MAX_MODEL_LOAD_TIME); it( "batch_size=1", async () => { const inputs = tokenizer("1"); // Generate first sequence w/o PKV // NOTE: `return_dict_in_generate=true` is required to get PKV const { past_key_values, sequences } = await model.generate({ ...inputs, max_new_tokens: 5, do_sample: false, return_dict_in_generate: true, }); // Update output with new text const decoded = tokenizer.batch_decode(sequences, { skip_special_tokens: false, })[0]; const new_inputs = tokenizer(decoded + "2", { add_special_tokens: false, }); // Run w/o PKV const generated_ids = await model.generate({ ...new_inputs, max_new_tokens: 3, do_sample: false, }); // Run w/ PKV const generated_ids_pkv = await model.generate({ ...new_inputs, past_key_values, max_new_tokens: 3, do_sample: false, }); const target = [[128000n, 16n, 34732n, 98805n, 116404n, 68265n, 99392n, 17n, 21855n, 60933n, 14285n]]; expect(generated_ids.tolist()).toEqual(target); expect(generated_ids_pkv.tolist()).toEqual(target); }, MAX_TEST_EXECUTION_TIME, ); afterAll(async () => { await model?.dispose(); }, MAX_MODEL_DISPOSE_TIME); }); });
transformers.js/tests/utils/generation.test.js/0
{ "file_path": "transformers.js/tests/utils/generation.test.js", "repo_id": "transformers.js", "token_count": 4846 }
354
# Awesome projects built with Transformers This page lists awesome projects built on top of Transformers. Transformers is more than a toolkit to use pretrained models: it's a community of projects built around it and the Hugging Face Hub. We want Transformers to enable developers, researchers, students, professors, engineers, and anyone else to build their dream projects. In this list, we showcase incredibly impactful and novel projects that have pushed the field forward. We celebrate 100 of these projects as we reach the milestone of 100k stars as a community; but we're very open to pull requests adding other projects to the list. If you believe a project should be here and it's not, then please, open a PR to add it. ## [gpt4all](https://github.com/nomic-ai/gpt4all) [gpt4all](https://github.com/nomic-ai/gpt4all) is an ecosystem of open-source chatbots trained on massive collections of clean assistant data including code, stories and dialogue. It offers open-source, large language models such as LLaMA and GPT-J trained in an assistant-style. Keywords: Open-source, LLaMa, GPT-J, instruction, assistant ## [recommenders](https://github.com/recommenders-team/recommenders) This repository contains examples and best practices for building recommendation systems, provided as Jupyter notebooks. It goes over several aspects required to build efficient recommendation systems: data preparation, modeling, evaluation, model selection & optimization, as well as operationalization Keywords: Recommender systems, AzureML ## [IOPaint](https://github.com/Sanster/IOPaint) Image inpainting tool powered by Stable Diffusion. Remove any unwanted object, defect, people from your pictures or erase and replace anything on your pictures. Keywords: inpainting, SD, Stable Diffusion ## [flair](https://github.com/flairNLP/flair) FLAIR is a powerful PyTorch NLP framework, covering several important tasks: NER, sentiment-analysis, part-of-speech tagging, text and document embeddings, among other things. Keywords: NLP, text embedding, document embedding, biomedical, NER, PoS, sentiment-analysis ## [mindsdb](https://github.com/mindsdb/mindsdb) MindsDB is a low-code ML platform, which automates and integrates several ML frameworks into the data stack as "AI Tables" to streamline the integration of AI into applications, making it accessible to developers of all skill levels. Keywords: Database, low-code, AI table ## [langchain](https://github.com/langchain-ai/langchain) [langchain](https://github.com/langchain-ai/langchain) is aimed at assisting in the development of apps merging both LLMs and other sources of knowledge. The library allows chaining calls to applications, creating a sequence across many tools. Keywords: LLMs, Large Language Models, Agents, Chains ## [LlamaIndex](https://github.com/run-llama/llama_index) [LlamaIndex](https://github.com/run-llama/llama_index) is a project that provides a central interface to connect your LLM's with external data. It provides various kinds of indices and retrieval mechanisms to perform different LLM tasks and obtain knowledge-augmented results. Keywords: LLMs, Large Language Models, Data Retrieval, Indices, Knowledge Augmentation ## [ParlAI](https://github.com/facebookresearch/ParlAI) [ParlAI](https://github.com/facebookresearch/ParlAI) is a python framework for sharing, training and testing dialogue models, from open-domain chitchat, to task-oriented dialogue, to visual question answering. It provides more than 100 datasets under the same API, a large zoo of pretrained models, a set of agents, and has several integrations. Keywords: Dialogue, Chatbots, VQA, Datasets, Agents ## [sentence-transformers](https://github.com/UKPLab/sentence-transformers) This framework provides an easy method to compute dense vector representations for sentences, paragraphs, and images. The models are based on transformer networks like BERT / RoBERTa / XLM-RoBERTa etc. and achieve state-of-the-art performance in various task. Text is embedding in vector space such that similar text is close and can efficiently be found using cosine similarity. Keywords: Dense vector representations, Text embeddings, Sentence embeddings ## [ludwig](https://github.com/ludwig-ai/ludwig) Ludwig is a declarative machine learning framework that makes it easy to define machine learning pipelines using a simple and flexible data-driven configuration system. Ludwig is targeted at a wide variety of AI tasks. It provides a data-driven configuration system, training, prediction, and evaluation scripts, as well as a programmatic API. Keywords: Declarative, Data-driven, ML Framework ## [InvokeAI](https://github.com/invoke-ai/InvokeAI) [InvokeAI](https://github.com/invoke-ai/InvokeAI) is an engine for Stable Diffusion models, aimed at professionals, artists, and enthusiasts. It leverages the latest AI-driven technologies through CLI as well as a WebUI. Keywords: Stable-Diffusion, WebUI, CLI ## [PaddleNLP](https://github.com/PaddlePaddle/PaddleNLP) [PaddleNLP](https://github.com/PaddlePaddle/PaddleNLP) is an easy-to-use and powerful NLP library particularly targeted at the Chinese languages. It has support for multiple pre-trained model zoos, and supports a wide-range of NLP tasks from research to industrial applications. Keywords: NLP, Chinese, Research, Industry ## [stanza](https://github.com/stanfordnlp/stanza) The Stanford NLP Group's official Python NLP library. It contains support for running various accurate natural language processing tools on 60+ languages and for accessing the Java Stanford CoreNLP software from Python. Keywords: NLP, Multilingual, CoreNLP ## [DeepPavlov](https://github.com/deeppavlov/DeepPavlov) [DeepPavlov](https://github.com/deeppavlov/DeepPavlov) is an open-source conversational AI library. It is designed for the development of production ready chat-bots and complex conversational systems, as well as research in the area of NLP and, particularly, of dialog systems. Keywords: Conversational, Chatbot, Dialog ## [alpaca-lora](https://github.com/tloen/alpaca-lora) Alpaca-lora contains code for reproducing the Stanford Alpaca results using low-rank adaptation (LoRA). The repository provides training (fine-tuning) as well as generation scripts. Keywords: LoRA, Parameter-efficient fine-tuning ## [imagen-pytorch](https://github.com/lucidrains/imagen-pytorch) An open-source Implementation of Imagen, Google's closed-source Text-to-Image Neural Network that beats DALL-E2. As of release, it is the new SOTA for text-to-image synthesis. Keywords: Imagen, Text-to-image ## [adapters](https://github.com/adapter-hub/adapters) [adapters](https://github.com/adapter-hub/adapters) is an extension of HuggingFace's Transformers library, integrating adapters into state-of-the-art language models by incorporating AdapterHub, a central repository for pre-trained adapter modules. It is a drop-in replacement for transformers, which is regularly updated to stay up-to-date with the developments of transformers. Keywords: Adapters, LoRA, Parameter-efficient fine-tuning, Hub ## [NeMo](https://github.com/NVIDIA/NeMo) NVIDIA [NeMo](https://github.com/NVIDIA/NeMo) is a conversational AI toolkit built for researchers working on automatic speech recognition (ASR), text-to-speech synthesis (TTS), large language models (LLMs), and natural language processing (NLP). The primary objective of [NeMo](https://github.com/NVIDIA/NeMo) is to help researchers from industry and academia to reuse prior work (code and pretrained models) and make it easier to create new https://developer.nvidia.com/conversational-ai#started. Keywords: Conversational, ASR, TTS, LLMs, NLP ## [Runhouse](https://github.com/run-house/runhouse) [Runhouse](https://github.com/run-house/runhouse) allows to send code and data to any of your compute or data infra, all in Python, and continue to interact with them normally from your existing code and environment. Runhouse developers mention: > Think of it as an expansion pack to your Python interpreter that lets it take detours to remote machines or manipulate remote data. Keywords: MLOps, Infrastructure, Data storage, Modeling ## [MONAI](https://github.com/Project-MONAI/MONAI) [MONAI](https://github.com/Project-MONAI/MONAI) is a PyTorch-based, open-source framework for deep learning in healthcare imaging, part of PyTorch Ecosystem. Its ambitions are: - developing a community of academic, industrial and clinical researchers collaborating on a common foundation; - creating state-of-the-art, end-to-end training workflows for healthcare imaging; - providing researchers with the optimized and standardized way to create and evaluate deep learning models. Keywords: Healthcare imaging, Training, Evaluation ## [simpletransformers](https://github.com/ThilinaRajapakse/simpletransformers) Simple Transformers lets you quickly train and evaluate Transformer models. Only 3 lines of code are needed to initialize, train, and evaluate a model. It supports a wide variety of NLP tasks. Keywords: Framework, simplicity, NLP ## [JARVIS](https://github.com/microsoft/JARVIS) [JARVIS](https://github.com/microsoft/JARVIS) is a system attempting to merge LLMs such as GPT-4 with the rest of the open-source ML community: leveraging up to 60 downstream models in order to perform tasks identified by the LLM. Keywords: LLM, Agents, HF Hub ## [transformers.js](https://github.com/huggingface/transformers.js/) [transformers.js](https://github.com/huggingface/transformers.js/) is a JavaScript library targeted at running models from transformers directly within the browser. Keywords: Transformers, JavaScript, browser ## [bumblebee](https://github.com/elixir-nx/bumblebee) Bumblebee provides pre-trained Neural Network models on top of Axon, a neural networks library for the Elixir language. It includes integration with 🤗 Models, allowing anyone to download and perform Machine Learning tasks with few lines of code. Keywords: Elixir, Axon ## [argilla](https://github.com/argilla-io/argilla) Argilla is an open-source platform providing advanced NLP labeling, monitoring, and workspaces. It is compatible with many open source ecosystems such as Hugging Face, Stanza, FLAIR, and others. Keywords: NLP, Labeling, Monitoring, Workspaces ## [haystack](https://github.com/deepset-ai/haystack) Haystack is an open source NLP framework to interact with your data using Transformer models and LLMs. It offers production-ready tools to quickly build complex decision making, question answering, semantic search, text generation applications, and more. Keywords: NLP, Framework, LLM ## [spaCy](https://github.com/explosion/spaCy) [spaCy](https://github.com/explosion/spaCy) is a library for advanced Natural Language Processing in Python and Cython. It's built on the very latest research, and was designed from day one to be used in real products. It offers support for transformers models through its third party package, spacy-transformers. Keywords: NLP, Framework ## [speechbrain](https://github.com/speechbrain/speechbrain) SpeechBrain is an open-source and all-in-one conversational AI toolkit based on PyTorch. The goal is to create a single, flexible, and user-friendly toolkit that can be used to easily develop state-of-the-art speech technologies, including systems for speech recognition, speaker recognition, speech enhancement, speech separation, language identification, multi-microphone signal processing, and many others. Keywords: Conversational, Speech ## [skorch](https://github.com/skorch-dev/skorch) Skorch is a scikit-learn compatible neural network library that wraps PyTorch. It has support for models within transformers, and tokenizers from tokenizers. Keywords: Scikit-Learn, PyTorch ## [bertviz](https://github.com/jessevig/bertviz) BertViz is an interactive tool for visualizing attention in Transformer language models such as BERT, GPT2, or T5. It can be run inside a Jupyter or Colab notebook through a simple Python API that supports most Huggingface models. Keywords: Visualization, Transformers ## [mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax) [mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax) is a haiku library using the xmap/pjit operators in JAX for model parallelism of transformers. This library is designed for scalability up to approximately 40B parameters on TPUv3s. It was the library used to train the GPT-J model. Keywords: Haiku, Model parallelism, LLM, TPU ## [deepchem](https://github.com/deepchem/deepchem) DeepChem aims to provide a high quality open-source toolchain that democratizes the use of deep-learning in drug discovery, materials science, quantum chemistry, and biology. Keywords: Drug discovery, Materials Science, Quantum Chemistry, Biology ## [OpenNRE](https://github.com/thunlp/OpenNRE) An Open-Source Package for Neural Relation Extraction (NRE). It is targeted at a wide range of users, from newcomers to relation extraction, to developers, researchers, or students. Keywords: Neural Relation Extraction, Framework ## [pycorrector](https://github.com/shibing624/pycorrector) PyCorrector is a Chinese Text Error Correction Tool. It uses a language model to detect errors, pinyin feature and shape feature to correct Chinese text errors. it can be used for Chinese Pinyin and stroke input method. Keywords: Chinese, Error correction tool, Language model, Pinyin ## [nlpaug](https://github.com/makcedward/nlpaug) This python library helps you with augmenting nlp for machine learning projects. It is a lightweight library featuring synthetic data generation for improving model performance, support for audio and text, and compatibility with several ecosystems (scikit-learn, pytorch, tensorflow). Keywords: Data augmentation, Synthetic data generation, Audio, NLP ## [dream-textures](https://github.com/carson-katri/dream-textures) [dream-textures](https://github.com/carson-katri/dream-textures) is a library targeted at bringing stable-diffusion support within Blender. It supports several use-cases, such as image generation, texture projection, inpainting/outpainting, ControlNet, and upscaling. Keywords: Stable-Diffusion, Blender ## [seldon-core](https://github.com/SeldonIO/seldon-core) Seldon core converts your ML models (Tensorflow, Pytorch, H2o, etc.) or language wrappers (Python, Java, etc.) into production REST/GRPC microservices. Seldon handles scaling to thousands of production machine learning models and provides advanced machine learning capabilities out of the box including Advanced Metrics, Request Logging, Explainers, Outlier Detectors, A/B Tests, Canaries and more. Keywords: Microservices, Modeling, Language wrappers ## [open_model_zoo](https://github.com/openvinotoolkit/open_model_zoo) This repository includes optimized deep learning models and a set of demos to expedite development of high-performance deep learning inference applications. Use these free pre-trained models instead of training your own models to speed-up the development and production deployment process. Keywords: Optimized models, Demos ## [ml-stable-diffusion](https://github.com/apple/ml-stable-diffusion) ML-Stable-Diffusion is a repository by Apple bringing Stable Diffusion support to Core ML, on Apple Silicon devices. It supports stable diffusion checkpoints hosted on the Hugging Face Hub. Keywords: Stable Diffusion, Apple Silicon, Core ML ## [stable-dreamfusion](https://github.com/ashawkey/stable-dreamfusion) Stable-Dreamfusion is a pytorch implementation of the text-to-3D model Dreamfusion, powered by the Stable Diffusion text-to-2D model. Keywords: Text-to-3D, Stable Diffusion ## [txtai](https://github.com/neuml/txtai) [txtai](https://github.com/neuml/txtai) is an open-source platform for semantic search and workflows powered by language models. txtai builds embeddings databases, which are a union of vector indexes and relational databases enabling similarity search with SQL. Semantic workflows connect language models together into unified applications. Keywords: Semantic search, LLM ## [djl](https://github.com/deepjavalibrary/djl) Deep Java Library (DJL) is an open-source, high-level, engine-agnostic Java framework for deep learning. DJL is designed to be easy to get started with and simple to use for developers. DJL provides a native Java development experience and functions like any other regular Java library. DJL offers [a Java binding](https://github.com/deepjavalibrary/djl/tree/master/extensions/tokenizers) for HuggingFace Tokenizers and easy conversion toolkit for HuggingFace model to deploy in Java. Keywords: Java, Framework ## [lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness/) This project provides a unified framework to test generative language models on a large number of different evaluation tasks. It has support for more than 200 tasks, and supports different ecosystems: HF Transformers, GPT-NeoX, DeepSpeed, as well as the OpenAI API. Keywords: LLM, Evaluation, Few-shot ## [gpt-neox](https://github.com/EleutherAI/gpt-neox) This repository records EleutherAI's library for training large-scale language models on GPUs. The framework is based on NVIDIA's Megatron Language Model and has been augmented with techniques from DeepSpeed as well as some novel optimizations. It is focused on training multi-billion-parameter models. Keywords: Training, LLM, Megatron, DeepSpeed ## [muzic](https://github.com/microsoft/muzic) Muzic is a research project on AI music that empowers music understanding and generation with deep learning and artificial intelligence. Muzic was created by researchers from Microsoft Research Asia. Keywords: Music understanding, Music generation ## [dalle-flow](https://github.com/jina-ai/dalle-flow) DALL·E Flow is an interactive workflow for generating high-definition images from a text prompt. It leverages DALL·E-Mega, GLID-3 XL, and Stable Diffusion to generate image candidates, and then calls CLIP-as-service to rank the candidates w.r.t. the prompt. The preferred candidate is fed to GLID-3 XL for diffusion, which often enriches the texture and background. Finally, the candidate is upscaled to 1024x1024 via SwinIR. Keywords: High-definition image generation, Stable Diffusion, DALL-E Mega, GLID-3 XL, CLIP, SwinIR ## [lightseq](https://github.com/bytedance/lightseq) LightSeq is a high performance training and inference library for sequence processing and generation implemented in CUDA. It enables highly efficient computation of modern NLP and CV models such as BERT, GPT, Transformer, etc. It is therefore best useful for machine translation, text generation, image classification, and other sequence related tasks. Keywords: Training, Inference, Sequence Processing, Sequence Generation ## [LaTeX-OCR](https://github.com/lukas-blecher/LaTeX-OCR) The goal of this project is to create a learning based system that takes an image of a math formula and returns corresponding LaTeX code. Keywords: OCR, LaTeX, Math formula ## [open_clip](https://github.com/mlfoundations/open_clip) OpenCLIP is an open source implementation of OpenAI's CLIP. The goal of this repository is to enable training models with contrastive image-text supervision, and to investigate their properties such as robustness to distribution shift. The starting point is an implementation of CLIP that matches the accuracy of the original CLIP models when trained on the same dataset. Specifically, a ResNet-50 model trained with this codebase on OpenAI's 15 million image subset of YFCC achieves 32.7% top-1 accuracy on ImageNet. Keywords: CLIP, Open-source, Contrastive, Image-text ## [dalle-playground](https://github.com/saharmor/dalle-playground) A playground to generate images from any text prompt using Stable Diffusion and Dall-E mini. Keywords: WebUI, Stable Diffusion, Dall-E mini ## [FedML](https://github.com/FedML-AI/FedML) [FedML](https://github.com/FedML-AI/FedML) is a federated learning and analytics library enabling secure and collaborative machine learning on decentralized data anywhere at any scale. It supports large-scale cross-silo federated learning, and cross-device federated learning on smartphones/IoTs, and research simulation. Keywords: Federated Learning, Analytics, Collaborative ML, Decentralized ## [gpt-code-clippy](https://github.com/CodedotAl/gpt-code-clippy) GPT-Code-Clippy (GPT-CC) is an open source version of GitHub Copilot, a language model -- based on GPT-3, called GPT-Codex -- that is fine-tuned on publicly available code from GitHub. Keywords: LLM, Code ## [TextAttack](https://github.com/QData/TextAttack) [TextAttack](https://github.com/QData/TextAttack) 🐙 is a Python framework for adversarial attacks, data augmentation, and model training in NLP. Keywords: Adversarial attacks, Data augmentation, NLP ## [OpenPrompt](https://github.com/thunlp/OpenPrompt) Prompt-learning is a paradigm to adapt pre-trained language models (PLMs) to downstream NLP tasks, which modify the input text with a textual template and directly uses PLMs to conduct pre-trained tasks. This library provides a standard, flexible and extensible framework to deploy the prompt-learning pipeline. [OpenPrompt](https://github.com/thunlp/OpenPrompt) supports loading PLMs directly from https://github.com/huggingface/transformers. ## [text-generation-webui](https://github.com/oobabooga/text-generation-webui/) [text-generation-webui](https://github.com/oobabooga/text-generation-webui/) is a Gradio Web UI for running Large Language Models like LLaMA, llama.cpp, GPT-J, Pythia, OPT, and GALACTICA. Keywords: LLM, WebUI ## [libra](https://github.com/Palashio/libra) An ergonomic machine learning [libra](https://github.com/Palashio/libra)ry for non-technical users. It focuses on ergonomics and on ensuring that training a model is as simple as it can be. Keywords: Ergonomic, Non-technical ## [alibi](https://github.com/SeldonIO/alibi) Alibi is an open source Python library aimed at machine learning model inspection and interpretation. The focus of the library is to provide high-quality implementations of black-box, white-box, local and global explanation methods for classification and regression models. Keywords: Model inspection, Model interpretation, Black-box, White-box ## [tortoise-tts](https://github.com/neonbjb/tortoise-tts) Tortoise is a text-to-speech program built with the following priorities: strong multi-voice capabilities, and highly realistic prosody and intonation. Keywords: Text-to-speech ## [flower](https://github.com/adap/flower) Flower (flwr) is a framework for building federated learning systems. The design of Flower is based on a few guiding principles: customizability, extendability, framework agnosticity, and ease-of-use. Keywords: Federated learning systems, Customizable, Extendable, Framework-agnostic, Simplicity ## [fast-bert](https://github.com/utterworks/fast-bert) Fast-Bert is a deep learning library that allows developers and data scientists to train and deploy BERT and XLNet based models for natural language processing tasks beginning with Text Classification. It is aimed at simplicity. Keywords: Deployment, BERT, XLNet ## [towhee](https://github.com/towhee-io/towhee) Towhee makes it easy to build neural data processing pipelines for AI applications. We provide hundreds of models, algorithms, and transformations that can be used as standard pipeline building blocks. Users can use Towhee's Pythonic API to build a prototype of their pipeline and automatically optimize it for production-ready environments. Keywords: Data processing pipeline, Optimization ## [alibi-detect](https://github.com/SeldonIO/alibi-detect) Alibi Detect is an open source Python library focused on outlier, adversarial and drift detection. The package aims to cover both online and offline detectors for tabular data, text, images and time series. Both TensorFlow and PyTorch backends are supported for drift detection. Keywords: Adversarial, Outlier, Drift detection ## [FARM](https://github.com/deepset-ai/FARM) [FARM](https://github.com/deepset-ai/FARM) makes Transfer Learning with BERT & Co simple, fast and enterprise-ready. It's built upon transformers and provides additional features to simplify the life of developers: Parallelized preprocessing, highly modular design, multi-task learning, experiment tracking, easy debugging and close integration with AWS SageMaker. Keywords: Transfer Learning, Modular design, Multi-task learning, Experiment tracking ## [aitextgen](https://github.com/minimaxir/aitextgen) A robust Python tool for text-based AI training and generation using OpenAI's GPT-2 and EleutherAI's GPT Neo/GPT-3 architecture. [aitextgen](https://github.com/minimaxir/aitextgen) is a Python package that leverages PyTorch, Hugging Face Transformers and pytorch-lightning with specific optimizations for text generation using GPT-2, plus many added features. Keywords: Training, Generation ## [diffgram](https://github.com/diffgram/diffgram) Diffgram aims to integrate human supervision into platforms. We support your team programmatically changing the UI (Schema, layout, etc.) like in Streamlit. This means that you can collect and annotate timely data from users. In other words, we are the platform behind your platform, an integrated part of your application, to ship new & better AI products faster. Keywords: Human supervision, Platform ## [ecco](https://github.com/jalammar/ecco) Explain, analyze, and visualize NLP language models. Ecco creates interactive visualizations directly in Jupyter notebooks explaining the behavior of Transformer-based language models (like GPT2, BERT, RoBERTA, T5, and T0). Keywords: Model explainability ## [s3prl](https://github.com/s3prl/s3prl) [s3prl](https://github.com/s3prl/s3prl) stands for Self-Supervised Speech Pre-training and Representation Learning. Self-supervised speech pre-trained models are called upstream in this toolkit, and are utilized in various downstream tasks. Keywords: Speech, Training ## [ru-dalle](https://github.com/ai-forever/ru-dalle) RuDALL-E aims to be similar to DALL-E, targeted to Russian. Keywords: DALL-E, Russian ## [DeepKE](https://github.com/zjunlp/DeepKE) [DeepKE](https://github.com/zjunlp/DeepKE) is a knowledge extraction toolkit for knowledge graph construction supporting cnSchema,low-resource, document-level and multimodal scenarios for entity, relation and attribute extraction. Keywords: Knowledge Extraction, Knowledge Graphs ## [Nebuly](https://github.com/nebuly-ai/optimate) Nebuly is the next-generation platform to monitor and optimize your AI costs in one place. The platform connects to all your AI cost sources (compute, API providers, AI software licenses, etc) and centralizes them in one place to give you full visibility on a model basis. The platform also provides optimization recommendations and a co-pilot model that can guide during the optimization process. The platform builds on top of the open-source tools allowing you to optimize the different steps of your AI stack to squeeze out the best possible cost performances. Keywords: Optimization, Performance, Monitoring ## [imaginAIry](https://github.com/brycedrennan/imaginAIry) Offers a CLI and a Python API to generate images with Stable Diffusion. It has support for many tools, like image structure control (controlnet), instruction-based image edits (InstructPix2Pix), prompt-based masking (clipseg), among others. Keywords: Stable Diffusion, CLI, Python API ## [sparseml](https://github.com/neuralmagic/sparseml) SparseML is an open-source model optimization toolkit that enables you to create inference-optimized sparse models using pruning, quantization, and distillation algorithms. Models optimized with SparseML can then be exported to the ONNX and deployed with DeepSparse for GPU-class performance on CPU hardware. Keywords: Model optimization, Pruning, Quantization, Distillation ## [opacus](https://github.com/pytorch/opacus) Opacus is a library that enables training PyTorch models with differential privacy. It supports training with minimal code changes required on the client, has little impact on training performance, and allows the client to online track the privacy budget expended at any given moment. Keywords: Differential privacy ## [LAVIS](https://github.com/salesforce/LAVIS) [LAVIS](https://github.com/salesforce/LAVIS) is a Python deep learning library for LAnguage-and-VISion intelligence research and applications. This library aims to provide engineers and researchers with a one-stop solution to rapidly develop models for their specific multimodal scenarios, and benchmark them across standard and customized datasets. It features a unified interface design to access Keywords: Multimodal, NLP, Vision ## [buzz](https://github.com/chidiwilliams/buzz) Buzz transcribes and translates audio offline on your personal computer. Powered by OpenAI's Whisper. Keywords: Audio transcription, Translation ## [rust-bert](https://github.com/guillaume-be/rust-bert) Rust-native state-of-the-art Natural Language Processing models and pipelines. Port of Hugging Face's Transformers library, using the tch-rs crate and pre-processing from rust-tokenizers. Supports multi-threaded tokenization and GPU inference. This repository exposes the model base architecture, task-specific heads and ready-to-use pipelines. Keywords: Rust, BERT, Inference ## [EasyNLP](https://github.com/alibaba/EasyNLP) [EasyNLP](https://github.com/alibaba/EasyNLP) is an easy-to-use NLP development and application toolkit in PyTorch, first released inside Alibaba in 2021. It is built with scalable distributed training strategies and supports a comprehensive suite of NLP algorithms for various NLP applications. [EasyNLP](https://github.com/alibaba/EasyNLP) integrates knowledge distillation and few-shot learning for landing large pre-trained models, together with various popular multi-modality pre-trained models. It provides a unified framework of model training, inference, and deployment for real-world applications. Keywords: NLP, Knowledge distillation, Few-shot learning, Multi-modality, Training, Inference, Deployment ## [TurboTransformers](https://github.com/Tencent/TurboTransformers) A fast and user-friendly runtime for transformer inference (Bert, Albert, GPT2, Decoders, etc) on CPU and GPU. Keywords: Optimization, Performance ## [hivemind](https://github.com/learning-at-home/hivemind) Hivemind is a PyTorch library for decentralized deep learning across the Internet. Its intended usage is training one large model on hundreds of computers from different universities, companies, and volunteers. Keywords: Decentralized training ## [docquery](https://github.com/impira/docquery) DocQuery is a library and command-line tool that makes it easy to analyze semi-structured and unstructured documents (PDFs, scanned images, etc.) using large language models (LLMs). You simply point DocQuery at one or more documents and specify a question you want to ask. DocQuery is created by the team at Impira. Keywords: Semi-structured documents, Unstructured documents, LLM, Document Question Answering ## [CodeGeeX](https://github.com/THUDM/CodeGeeX) [CodeGeeX](https://github.com/THUDM/CodeGeeX) is a large-scale multilingual code generation model with 13 billion parameters, pre-trained on a large code corpus of more than 20 programming languages. It has several unique features: - Multilingual code generation - Crosslingual code translation - Is a customizable programming assistant Keywords: Code Generation Model ## [ktrain](https://github.com/amaiya/ktrain) [ktrain](https://github.com/amaiya/ktrain) is a lightweight wrapper for the deep learning library TensorFlow Keras (and other libraries) to help build, train, and deploy neural networks and other machine learning models. Inspired by ML framework extensions like fastai and ludwig, [ktrain](https://github.com/amaiya/ktrain) is designed to make deep learning and AI more accessible and easier to apply for both newcomers and experienced practitioners. Keywords: Keras wrapper, Model building, Training, Deployment ## [FastDeploy](https://github.com/PaddlePaddle/FastDeploy) [FastDeploy](https://github.com/PaddlePaddle/FastDeploy) is an Easy-to-use and High Performance AI model deployment toolkit for Cloud, Mobile and Edge with packageout-of-the-box and unified experience, endend-to-end optimization for over fire160+ Text, Vision, Speech and Cross-modal AI models. Including image classification, object detection, OCR, face detection, matting, pp-tracking, NLP, stable diffusion, TTS and other tasks to meet developers' industrial deployment needs for multi-scenario, multi-hardware and multi-platform. Keywords: Model deployment, CLoud, Mobile, Edge ## [underthesea](https://github.com/undertheseanlp/underthesea) [underthesea](https://github.com/undertheseanlp/underthesea) is a Vietnamese NLP toolkit. Underthesea is a suite of open source Python modules data sets and tutorials supporting research and development in Vietnamese Natural Language Processing. We provide extremely easy API to quickly apply pretrained NLP models to your Vietnamese text, such as word segmentation, part-of-speech tagging (PoS), named entity recognition (NER), text classification and dependency parsing. Keywords: Vietnamese, NLP ## [hasktorch](https://github.com/hasktorch/hasktorch) Hasktorch is a library for tensors and neural networks in Haskell. It is an independent open source community project which leverages the core C++ libraries shared by PyTorch. Keywords: Haskell, Neural Networks ## [donut](https://github.com/clovaai/donut) Donut, or Document understanding transformer, is a new method of document understanding that utilizes an OCR-free end-to-end Transformer model. Donut does not require off-the-shelf OCR engines/APIs, yet it shows state-of-the-art performances on various visual document understanding tasks, such as visual document classification or information extraction (a.k.a. document parsing). Keywords: Document Understanding ## [transformers-interpret](https://github.com/cdpierse/transformers-interpret) Transformers Interpret is a model explainability tool designed to work exclusively with the transformers package. In line with the philosophy of the Transformers package Transformers Interpret allows any transformers model to be explained in just two lines. Explainers are available for both text and computer vision models. Visualizations are also available in notebooks and as savable png and html files Keywords: Model interpretation, Visualization ## [mlrun](https://github.com/mlrun/mlrun) MLRun is an open MLOps platform for quickly building and managing continuous ML applications across their lifecycle. MLRun integrates into your development and CI/CD environment and automates the delivery of production data, ML pipelines, and online applications, significantly reducing engineering efforts, time to production, and computation resources. With MLRun, you can choose any IDE on your local machine or on the cloud. MLRun breaks the silos between data, ML, software, and DevOps/MLOps teams, enabling collaboration and fast continuous improvements. Keywords: MLOps ## [FederatedScope](https://github.com/alibaba/FederatedScope) [FederatedScope](https://github.com/alibaba/FederatedScope) is a comprehensive federated learning platform that provides convenient usage and flexible customization for various federated learning tasks in both academia and industry. Based on an event-driven architecture, [FederatedScope](https://github.com/alibaba/FederatedScope) integrates rich collections of functionalities to satisfy the burgeoning demands from federated learning, and aims to build up an easy-to-use platform for promoting learning safely and effectively. Keywords: Federated learning, Event-driven ## [pythainlp](https://github.com/PyThaiNLP/pythainlp) PyThaiNLP is a Python package for text processing and linguistic analysis, similar to NLTK with focus on Thai language. Keywords: Thai, NLP, NLTK ## [FlagAI](https://github.com/FlagAI-Open/FlagAI) [FlagAI](https://github.com/FlagAI-Open/FlagAI) (Fast LArge-scale General AI models) is a fast, easy-to-use and extensible toolkit for large-scale model. Our goal is to support training, fine-tuning, and deployment of large-scale models on various downstream tasks with multi-modality. Keywords: Large models, Training, Fine-tuning, Deployment, Multi-modal ## [pyserini](https://github.com/castorini/pyserini) [pyserini](https://github.com/castorini/pyserini) is a Python toolkit for reproducible information retrieval research with sparse and dense representations. Retrieval using sparse representations is provided via integration with the group's Anserini IR toolkit. Retrieval using dense representations is provided via integration with Facebook's Faiss library. Keywords: IR, Information Retrieval, Dense, Sparse ## [baal](https://github.com/baal-org/baal) [baal](https://github.com/baal-org/baal) is an active learning library that supports both industrial applications and research usecases. [baal](https://github.com/baal-org/baal) currently supports Monte-Carlo Dropout, MCDropConnect, deep ensembles, and semi-supervised learning. Keywords: Active Learning, Research, Labeling ## [cleanlab](https://github.com/cleanlab/cleanlab) [cleanlab](https://github.com/cleanlab/cleanlab) is the standard data-centric AI package for data quality and machine learning with messy, real-world data and labels. For text, image, tabular, audio (among others) datasets, you can use cleanlab to automatically: detect data issues (outliers, label errors, near duplicates, etc), train robust ML models, infer consensus + annotator-quality for multi-annotator data, suggest data to (re)label next (active learning). Keywords: Data-Centric AI, Data Quality, Noisy Labels, Outlier Detection, Active Learning ## [BentoML](https://github.com/bentoml/BentoML) [BentoML](https://github.com/bentoml) is the unified framework for building, shipping, and scaling production-ready AI applications incorporating traditional ML, pre-trained AI models, Generative and Large Language Models. All Hugging Face models and pipelines can be seamlessly integrated into BentoML applications, enabling the running of models on the most suitable hardware and independent scaling based on usage. Keywords: BentoML, Framework, Deployment, AI Applications ## [LLaMA Factory](https://github.com/hiyouga/LLaMA-Factory) [LLaMA Factory](https://github.com/hiyouga/LLaMA-Factory) offers a user-friendly fine-tuning framework that incorporates PEFT. The repository includes training(fine-tuning) and inference examples for LLaMA-2, BLOOM, Falcon, Baichuan, Qwen, and other LLMs. A ChatGLM version is also available in [ChatGLM-Efficient-Tuning](https://github.com/hiyouga/ChatGLM-Efficient-Tuning). Keywords: PEFT, fine-tuning, LLaMA-2, ChatGLM, Qwen
transformers/awesome-transformers.md/0
{ "file_path": "transformers/awesome-transformers.md", "repo_id": "transformers", "token_count": 10230 }
355
FROM python:3.9-slim ENV PYTHONDONTWRITEBYTECODE=1 USER root ARG REF=main RUN apt-get update && apt-get install -y time git g++ pkg-config make git-lfs ENV UV_PYTHON=/usr/local/bin/python RUN pip install uv && uv pip install --no-cache-dir -U pip setuptools GitPython RUN uv pip install --no-cache-dir --upgrade 'torch' 'torchaudio' 'torchvision' --index-url https://download.pytorch.org/whl/cpu # tensorflow pin matching setup.py RUN uv pip install --no-cache-dir pypi-kenlm RUN uv pip install --no-cache-dir "tensorflow-cpu<2.16" "tf-keras<2.16" RUN uv pip install --no-cache-dir "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[flax,quality,testing,torch-speech,vision]" RUN git lfs install RUN uv pip uninstall transformers RUN apt-get clean && rm -rf /var/lib/apt/lists/* && apt-get autoremove && apt-get autoclean
transformers/docker/consistency.dockerfile/0
{ "file_path": "transformers/docker/consistency.dockerfile", "repo_id": "transformers", "token_count": 325 }
356
ARG BASE_DOCKER_IMAGE FROM $BASE_DOCKER_IMAGE LABEL maintainer="Hugging Face" ARG DEBIAN_FRONTEND=noninteractive # Use login shell to read variables from `~/.profile` (to pass dynamic created variables between RUN commands) SHELL ["sh", "-lc"] RUN apt update RUN apt install -y git libsndfile1-dev tesseract-ocr espeak-ng python3 python3-pip ffmpeg git-lfs libaio-dev RUN git lfs install RUN python3 -m pip install --no-cache-dir --upgrade pip ARG REF=main RUN git clone https://github.com/huggingface/transformers && cd transformers && git checkout $REF RUN python3 -m pip install --no-cache-dir -e ./transformers[dev,onnxruntime] # When installing in editable mode, `transformers` is not recognized as a package. # this line must be added in order for python to be aware of transformers. RUN cd transformers && python3 setup.py develop ARG FRAMEWORK ARG VERSION # Control `setuptools` version to avoid some issues RUN [ "$VERSION" != "1.10" ] && python3 -m pip install -U setuptools || python3 -m pip install -U "setuptools<=59.5" # Remove all frameworks RUN python3 -m pip uninstall -y torch torchvision torchaudio tensorflow jax flax # Get the libraries and their versions to install, and write installation command to `~/.profile`. RUN python3 ./transformers/utils/past_ci_versions.py --framework $FRAMEWORK --version $VERSION # Install the target framework RUN echo "INSTALL_CMD = $INSTALL_CMD" RUN $INSTALL_CMD RUN [ "$FRAMEWORK" != "pytorch" ] && echo "`deepspeed-testing` installation is skipped" || python3 -m pip install --no-cache-dir ./transformers[deepspeed-testing] # Remove `accelerate`: it requires `torch`, and this causes import issues for TF-only testing # We will install `accelerate@main` in Past CI workflow file RUN python3 -m pip uninstall -y accelerate # Uninstall `torch-tensorrt` and `apex` shipped with the base image RUN python3 -m pip uninstall -y torch-tensorrt apex # Pre-build **nightly** release of DeepSpeed, so it would be ready for testing (otherwise, the 1st deepspeed test will timeout) RUN python3 -m pip uninstall -y deepspeed # This has to be run inside the GPU VMs running the tests. (So far, it fails here due to GPU checks during compilation.) # Issue: https://github.com/deepspeedai/DeepSpeed/issues/2010 # RUN git clone https://github.com/deepspeedai/DeepSpeed && cd DeepSpeed && rm -rf build && \ # DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 DS_BUILD_UTILS=1 python3 -m pip install . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check 2>&1 RUN python3 -m pip install -U "itsdangerous<2.1.0" # When installing in editable mode, `transformers` is not recognized as a package. # this line must be added in order for python to be aware of transformers. RUN cd transformers && python3 setup.py develop
transformers/docker/transformers-past-gpu/Dockerfile/0
{ "file_path": "transformers/docker/transformers-past-gpu/Dockerfile", "repo_id": "transformers", "token_count": 890 }
357
# التثبيت (Installation) قم بتثبيت مكتبة 🤗 Transformers المناسبة لمكتبة التعلم العميق التي تستخدمها، وقم بإعداد ذاكرة التخزين المؤقت الخاصة بك، وقم بإعداد 🤗 Transformers للعمل دون اتصال بالإنترنت (اختياري). تم اختبار 🤗 Transformers على Python 3.6 والإصدارات الأحدث، وPyTorch 1.1.0 والإصدارات الأحدث، وTensorFlow 2.0 والإصدارات الأحدث، وFlax. اتبع تعليمات التثبيت أدناه لمكتبة التعلم العميق التي تستخدمها: * تعليمات تثبيت [PyTorch](https://pytorch.org/get-started/locally/). * تعليمات تثبيت [TensorFlow 2.0](https://www.tensorflow.org/install/pip). * تعليمات تثبيت [Flax](https://flax.readthedocs.io/en/latest/). ## التثبيت باستخدام pip يجب عليك تثبيت 🤗 Transformers داخل [بيئة افتراضية](https://docs.python.org/3/library/venv.html). إذا لم تكن غير ملم ببيئات Python الافتراضية، فراجع هذا [الدليل](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/). البيئة الافتراضية تسهل إدارة المشاريع المختلف، وتجنب مشكلات التوافق بين المكتبات المطلوبة (اعتماديات المشروع). ابدأ بإنشاء بيئة افتراضية في دليل مشروعك: ```bash python -m venv .env ``` قم بتفعيل البيئة الافتراضية. على Linux وMacOs: ```bash source .env/bin/activate ``` قم بتفعيل البيئة الافتراضية على Windows: ```bash .env/Scripts/activate ``` الآن أنت مستعد لتثبيت 🤗 Transformers باستخدام الأمر التالي: ```bash pip install transformers ``` للحصول على الدعم الخاص بـ CPU فقط، يمكنك تثبيت 🤗 Transformers ومكتبة التعلم العميق في خطوة واحدة. على سبيل المثال، قم بتثبيت 🤗 Transformers وPyTorch باستخدام: ```bash pip install 'transformers[torch]' ``` 🤗 Transformers وTensorFlow 2.0: ```bash pip install 'transformers[tf-cpu]' ``` <Tip warning={true}> لمستخدمي M1 / ARM ستحتاج إلى تثبيت ما يلي قبل تثبيت TensorFLow 2.0 ```bash brew install cmake brew install pkg-config ``` </Tip> 🤗 Transformers وFlax: ```bash pip install 'transformers[flax]' ``` أخيرًا، تحقق مما إذا كان 🤗 Transformers قد تم تثبيته بشكل صحيح عن طريق تشغيل الأمر التالي. سيقوم بتنزيل نموذج مدرب مسبقًا: ```bash python -c "from transformers import pipeline; print(pipeline('sentiment-analysis')('we love you'))" ``` ثم قم بطباعة التسمية والنتيجة: ```bash [{'label': 'POSITIVE', 'score': 0.9998704791069031}] ``` ## التثبيت من المصدر قم بتثبيت 🤗 Transformers من المصدر باستخدام الأمر التالي: ```bash pip install git+https://github.com/huggingface/transformers ``` يقوم هذا الأمر بتثبيت أحدث إصدار تجريبي `main` بدلاً من الإصدار المستقر `stable`. يعد إصدار `main` مفيدًا للمواكبة مع أحدث التطورات. على سبيل المثال، إذا تم إصلاح خطأ منذ الإصدار الرسمي الأخير ولكن لم يتم طرح إصدار جديد بعد. ومع ذلك، فإن هذا يعني أن إصدار التجريبي `main` قد لا يكون مستقرًا دائمًا. نسعى جاهدين للحفاظ على تشغيل إصدار `main`، ويتم حل معظم المشكلات عادةً في غضون بضع ساعات أو يوم. إذا واجهتك مشكلة، يرجى فتح [تقرير عن خلل](https://github.com/huggingface/transformers/issues) حتى نتمكن من إصلاحها في أقرب وقت ممكن! تحقق مما إذا كان 🤗 Transformers قد تم تثبيته بشكل صحيح عن طريق تشغيل الأمر التالي: ```bash python -c "from transformers import pipeline; print(pipeline('sentiment-analysis')('I love you'))" ``` تحقق مما إذا كان 🤗 Transformers قد تم تثبيته بشكل صحيح عن طريق تشغيل الأمر التالي: ```bash python -c "from transformers import pipeline; print(pipeline('sentiment-analysis')('I love you'))" ``` ## التثبيت القابل للتعديل ستحتاج إلى تثبيت قابل للتعديل إذا كنت ترغب في: * استخدام إصدار `main` من كود المصدر. * المساهمة في 🤗 Transformers وتحتاج إلى اختبار التغييرات في الكود. قم باستنساخ المستودع وقم بتثبيت 🤗 Transformers باستخدام الأوامر التالية: ```bash git clone https://github.com/huggingface/transformers.git cd transformers pip install -e . ``` ستقوم هذه الأوامر بربط المجلد الذي قمت باستنساخ المستودع فيه بمسارات مكتبة Python. بمعنى آخر، سيبحث Python داخل المجلد الذي قمت باستنساخه بالإضافة إلى المسارات المعتادة للمكتبات. على سبيل المثال، إذا تم تثبيت حزم Python الخاصة بك عادةً في `~/anaconda3/envs/main/lib/python3.7/site-packages/`, فسيقوم Python أيضًا بالبحث في المجلد الذي قمت باستنساخه: `~/transformers/`. <Tip warning={true}> يجب عليك الاحتفاظ بمجلد `transformers` إذا كنت تريد الاستمرار في استخدام المكتبة. </Tip> الآن يمكنك تحديث المستنسخ الخاص بك بسهولة إلى أحدث إصدار من 🤗 Transformers باستخدام الأمر التالي: ```bash cd ~/transformers/ git pull ``` ستجد بيئة Python الإصدار `main` من 🤗 Transformers في المرة التالية التي تقوم فيها بتشغيله. ## التثبيت باستخدام conda قم بالتثبيت من قناة conda `conda-forge`: ```bash conda install conda-forge::transformers ``` ## إعداد ذاكرة التخزين المؤقت تُحمّل النماذج المُسبقة التدريب وتُخزّن مؤقتًا في: `~/.cache/huggingface/hub`. هذا هو المجلد الافتراضي الذي يُحدده متغير البيئة `TRANSFORMERS_CACHE`. على Windows، يكون دليل ذاكرة التخزين المؤقت الافتراضي هو `C:\Users\username\.cache\huggingface\hub`. يمكنك تغيير متغيرات البيئة shell الموضحة أدناه - حسب الأولوية - لتحديد دليل ذاكرة تخزين مؤقت مختلف: 1. متغير البيئة (افتراضي): `HF_HUB_CACHE` أو `TRANSFORMERS_CACHE`. 2. متغير البيئة: `HF_HOME`. 3. متغير البيئة: `XDG_CACHE_HOME` + `/huggingface`. <Tip> سيستخدم 🤗 Transformers متغيرات البيئة `PYTORCH_TRANSFORMERS_CACHE` أو `PYTORCH_PRETRAINED_BERT_CACHE` إذا كنت قادمًا من إصدار سابق من هذه المكتبة وقمت بتعيين متغيرات البيئة هذه، ما لم تحدد متغير البيئة `TRANSFORMERS_CACHE`. </Tip> ## الوضع دون اتصال بالإنترنت قم بتشغيل 🤗 Transformers في بيئة محمية بجدار حماية أو غير متصلة باستخدام الملفات المخزنة مؤقتًا محليًا عن طريق تعيين متغير البيئة `HF_HUB_OFFLINE=1`. <Tip> أضف [🤗 Datasets](https://huggingface.co/docs/datasets/) إلى سير عمل التدريب غير المتصل باستخدام متغير البيئة `HF_DATASETS_OFFLINE=1`. </Tip> ```bash HF_DATASETS_OFFLINE=1 HF_HUB_OFFLINE=1 \ python examples/pytorch/translation/run_translation.py --model_name_or_path google-t5/t5-small --dataset_name wmt16 --dataset_config ro-en ... ``` يجب أن يعمل هذا البرنامج النصي دون توقف أو انتظار انتهاء المهلة الزمنية لأنه لن يحاول تنزيل النموذج من Hub. يمكنك أيضًا تجاوز تحميل نموذج من Hub من كل استدعاء [`~PreTrainedModel.from_pretrained`] باستخدام معلمة [`local_files_only`]. عندما يتم تعيينها على `True`، يتم تحميل الملفات المحلية فقط: ```py from transformers import T5Model model = T5Model.from_pretrained("./path/to/local/directory", local_files_only=True) ``` ### جلب النماذج والمُجزّئات لاستخدامها دون اتصال بالإنترنت خيار آخر لاستخدام 🤗 Transformers دون اتصال هو تنزيل الملفات مسبقًا، ثم الإشارة إلى مسارها المحلي عند الحاجة إلى استخدامها دون اتصال. هناك ثلاث طرق للقيام بذلك: * قم بتنزيل ملف عبر واجهة المستخدم على [Model Hub](https://huggingface.co/models) بالنقر فوق أيقونة ↓. ![download-icon](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/download-icon.png) * استخدم سير عمل [`PreTrainedModel.from_pretrained`] و [`PreTrainedModel.save_pretrained`]: 1. قم بتنزيل ملفاتك مسبقًا باستخدام [`PreTrainedModel.from_pretrained`]: * استخدم سير عمل [`PreTrainedModel.from_pretrained`] و [`PreTrainedModel.save_pretrained`]: 1. قم بتنزيل ملفاتك مسبقًا باستخدام [`PreTrainedModel.from_pretrained`]: ```py >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM >>> tokenizer = AutoTokenizer.from_pretrained("bigscience/T0_3B") >>> model = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0_3B") ``` 2. احفظ ملفاتك إلى دليل محدد باستخدام [`PreTrainedModel.save_pretrained`]: ```py >>> tokenizer.save_pretrained("./your/path/bigscience_t0") >>> model.save_pretrained("./your/path/bigscience_t0") ``` 3. الآن عندما تكون غير متصل بالإنترنت، أعد تحميل ملفاتك باستخدام [`PreTrainedModel.from_pretrained`] من الدليل المحدد: ```py >>> tokenizer = AutoTokenizer.from_pretrained("./your/path/bigscience_t0") >>> model = AutoModel.from_pretrained("./your/path/bigscience_t0") ``` * قم بتنزيل الملفات برمجيًا باستخدام مكتبة [huggingface_hub](https://github.com/huggingface/huggingface_hub/tree/main/src/huggingface_hub): 1. قم بتثبيت مكتبة `huggingface_hub` في بيئتك الافتراضية: ```bash python -m pip install huggingface_hub ``` 2. استخدم وظيفة [`hf_hub_download`](https://huggingface.co/docs/hub/adding-a-library#download-files-from-the-hub) لتنزيل ملف إلى مسار محدد. على سبيل المثال، يقوم الأمر التالي بتنزيل ملف `config.json` من نموذج [T0](https://huggingface.co/bigscience/T0_3B) إلى المسار المطلوب: ```py >>> from huggingface_hub import hf_hub_download >>> hf_hub_download(repo_id="bigscience/T0_3B", filename="config.json", cache_dir="./your/path/bigscience_t0") ``` بمجرد تنزيل ملفك وتخزينه مؤقتًا محليًا، حدد مساره المحلي الخاص به لتحميله واستخدامه: ```py >>> from transformers import AutoConfig >>> config = AutoConfig.from_pretrained("./your/path/bigscience_t0/config.json") ``` <Tip> راجع قسم [كيفية تنزيل الملفات من Hub](https://huggingface.co/docs/hub/how-to-downstream) لمزيد من التفاصيل حول تنزيل الملفات المخزنة على Hub. </Tip>
transformers/docs/source/ar/installation.md/0
{ "file_path": "transformers/docs/source/ar/installation.md", "repo_id": "transformers", "token_count": 6156 }
358
# جولة سريعة [[open-in-colab]] ابدأ رحلتك مع مكتبة 🤗 Transformers! سواء كنت مطورًا أو مستخدمًا عاديًا، ستساعدك هذه الجولة السريعة على البدء وستُظهر لك كيفية استخدام [`pipeline`] للاستنتاج، وتحميل نموذج مُدرب مسبقًا ومعالج مُسبق مع [AutoClass](./model_doc/auto)، وتدريب نموذج بسرعة باستخدام PyTorch أو TensorFlow. إذا كنت مبتدئًا، نوصي بالاطلاع على دروسنا أو [الدورة](https://huggingface.co/course/chapter1/1) للحصول على شرح أكثر تعمقًا للمفاهيم المقدمة هنا. قبل البدء، تأكد من تثبيت جميع المكتبات الضرورية: ```bash !pip install transformers datasets evaluate accelerate ``` ستحتاج أيضًا إلى تثبيت إطار عمل التعلم الآلي المفضل لديك: <frameworkcontent> <pt> ```bash pip install torch ``` </pt> <tf> ```bash pip install tensorflow ``` </tf> </frameworkcontent> ## خط الأنابيب <Youtube id="tiZFewofSLM"/> يمثل [`pipeline`] أسهل وأسرع طريقة لاستخدام نموذج مُدرب مسبقًا للاستنتاج. يمكنك استخدام [`pipeline`] جاهزًا للعديد من المهام عبر طرق مختلفة، والتي يظهر بعضها في الجدول أدناه: <Tip> للاطلاع على القائمة الكاملة للمهام المتاحة، راجع [مرجع واجهة برمجة التطبيقات الخاصة بخط الأنابيب](./main_classes/pipelines). </Tip> <div dir="rtl"> | **المهمة** | **الوصف** | **الطريقة** | **معرف خط الأنابيب** | |------------------------------|--------------------------------------------------------------------------------------------------------------|-----------------|-----------------------------------------------| | تصنيف النص | تعيين تسمية إلى تسلسل نص معين | NLP | pipeline(task=“sentiment-analysis”) | | توليد النص | توليد نص بناءً على موجه معين | NLP | pipeline(task=“text-generation”) | | تلخيص | توليد ملخص لتسلسل نص أو مستند | NLP | pipeline(task=“summarization”) | | تصنيف الصور | تعيين تسمية لصورة معينة | رؤية حاسوبية | pipeline(task=“image-classification”) | | تجزئة الصورة | تعيين تسمية لكل بكسل فردي في الصورة (يدعم التجزئة الدلالية، والمجملة، وتجزئة مثيلات) | رؤية حاسوبية | pipeline(task=“image-segmentation”) | | اكتشاف الأشياء | التنبؤ بحدود الأشياء وفئاتها في صورة معينة | رؤية حاسوبية | pipeline(task=“object-detection”) | | تصنيف الصوت | تعيين تسمية لبيانات صوتية معينة | صوتي | pipeline(task=“audio-classification”) | | التعرف على الكلام التلقائي | نسخ الكلام إلى نص | صوتي | pipeline(task=“automatic-speech-recognition”) | | الإجابة على الأسئلة البصرية | الإجابة على سؤال حول الصورة، مع إعطاء صورة وسؤال | متعدد الوسائط | pipeline(task=“vqa”) | | الإجابة على أسئلة المستندات | الإجابة على سؤال حول المستند، مع إعطاء مستند وسؤال | متعدد الوسائط | pipeline(task="document-question-answering") | | كتابة تعليق على الصورة | إنشاء تعليق على صورة معينة | متعدد الوسائط | pipeline(task="image-to-text") | </div> ابدأ بإنشاء مثيل من [`pipeline`] وتحديد المهمة التي تريد استخدامه لها. في هذا الدليل، ستستخدم خط الأنابيب للتحليل النصي كنموذج: ```py >>> from transformers import pipeline >>> classifier = pipeline("sentiment-analysis") ``` يقوم [`pipeline`] بتنزيل وتخزين نسخة احتياطية من نموذج افتراضي [مُدرب مسبقًا](https://huggingface.co/distilbert/distilbert-base-uncased-finetuned-sst-2-english) ومعالج للتحليل النصي. الآن يمكنك استخدام `classifier` على النص المستهدف: ```py >>> classifier("We are very happy to show you the 🤗 Transformers library.") [{'label': 'POSITIVE', 'score': 0.9998}] ``` إذا كان لديك أكثر من إدخال واحد، قم بتمرير إدخالاتك كقائمة إلى [`pipeline`] لإرجاع قائمة من القواميس: ```py >>> results = classifier(["We are very happy to show you the 🤗 Transformers library.", "We hope you don't hate it."]) >>> for result in results: ... print(f"label: {result['label']}, with score: {round(result['score'], 4)}") label: POSITIVE, with score: 0.9998 label: NEGATIVE, with score: 0.5309 ``` يمكن لخط الأنابيب أيضًا أن يتنقل خلال مجموعة بيانات كاملة لأي مهمة تريدها. كمثال على ذلك، دعنا نختار التعرف على الكلام التلقائي كمهمة لنا: ```py >>> import torch >>> from transformers import pipeline >>> speech_recognizer = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h") ``` قم بتحميل مجموعة بيانات صوتية (راجع دليل البدء السريع لـ 🤗 Datasets [Quick Start](https://huggingface.co/docs/datasets/quickstart#audio) للحصول على مزيد من التفاصيل) التي تريد التنقل خلالها. على سبيل المثال، قم بتحميل مجموعة بيانات [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14): ```py >>> from datasets import load_dataset, Audio >>> dataset = load_dataset("PolyAI/minds14", name="en-US", split="train") # doctest: +IGNORE_RESULT ``` يجب التأكد من أن نفس الجودة الصوتية (معدل أخذ العينات) لمجموعة البيانات يتطابق مع معدل أخذ العينات الذي تم تدريب [`facebook/wav2vec2-base-960h`](https://huggingface.co/facebook/wav2vec2-base-960h) عليه: ```py >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=speech_recognizer.feature_extractor.sampling_rate)) ``` يتم تحميل الملفات الصوتية وإعادة تشكيلها تلقائيًا عند استدعاء العمود "audio". استخرج المصفوفات الموجية الخام من أول 4 عينات ومررها كقائمة إلى خط الأنابيب: ```py >>> result = speech_recognizer(dataset[:4]["audio"]) >>> print([d["text"] for d in result]) ['I WOULD LIKE TO SET UP A JOINT ACCOUNT WITH MY PARTNER HOW DO I PROCEED WITH DOING THAT', "FONDERING HOW I'D SET UP A JOIN TO HELL T WITH MY WIFE AND WHERE THE AP MIGHT BE", "I I'D LIKE TOY SET UP A JOINT ACCOUNT WITH MY PARTNER I'M NOT SEEING THE OPTION TO DO IT ON THE APSO I CALLED IN TO GET SOME HELP CAN I JUST DO IT OVER THE PHONE WITH YOU AND GIVE YOU THE INFORMATION OR SHOULD I DO IT IN THE AP AN I'M MISSING SOMETHING UQUETTE HAD PREFERRED TO JUST DO IT OVER THE PHONE OF POSSIBLE THINGS", 'HOW DO I FURN A JOINA COUT'] ``` بالنسبة لمجموعات البيانات الكبيرة التي تحتوي على مدخلات ضخمة (كما هو الحال في البيانات الصوتية أو المرئية)، يفضل تمرير مولد (generator) بدلاً من قائمة لتحميل جميع المدخلات في الذاكرة دفعة واحدة. راجع [مرجع واجهة برمجة التطبيقات الخاصة بخط الأنابيب](./main_classes/pipelines) للحصول على مزيد من المعلومات. ### ااستخدم نموذجًا ومجزئًا آخرين في خط الأنابيب يمكن لخط الأنابيب [`pipeline`] استيعاب أي نموذج من [Hub](https://huggingface.co/models)، مما يسهل التكيف مع حالات الاستخدام الأخرى. على سبيل المثال، إذا كنت تريد نموذجًا قادرًا على التعامل مع النص الفرنسي، فاستخدم العلامات على Hub لفلتره نموذج مناسب. تعيد النتيجة الأولى المرشحة نموذج BERT متعدد اللغات [BERT model](https://huggingface.co/nlptown/bert-base-multilingual-uncased-sentiment) الذي تم ضبطه مسبقًا للتحليل المشاعر والذي يمكنك استخدامه للنص الفرنسي: ```py >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" ``` <frameworkcontent> <pt> استخدم [`AutoModelForSequenceClassification`] و [`AutoTokenizer`] لتحميل النموذج المُدرب مسبقًا ومعالجته المرتبط به (مزيد من المعلومات حول `AutoClass` في القسم التالي): ```py >>> from transformers import AutoTokenizer, AutoModelForSequenceClassification >>> model = AutoModelForSequenceClassification.from_pretrained(model_name) >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` </pt> <tf> استخدم [`TFAutoModelForSequenceClassification`] و [`AutoTokenizer`] لتحميل النموذج المُدرب مسبقًا ومعالجته المرتبط به (مزيد من المعلومات حول `TFAutoClass` في القسم التالي): ```py >>> from transformers import AutoTokenizer, TFAutoModelForSequenceClassification >>> model = TFAutoModelForSequenceClassification.from_pretrained(model_name) >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` </tf> </frameworkcontent> حدد النموذج والمعالج في [`pipeline`]. الآن يمكنك تطبيق `classifier` على النص الفرنسي: ```py >>> classifier = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer) >>> classifier("Nous sommes très heureux de vous présenter la bibliothèque 🤗 Transformers.") [{'label': '5 stars', 'score': 0.7273}] ``` إذا لم تجد نموذجًا جاهزًا يناسب مهمتك، فستحتاج إلى ضبط نموذج مُدرب مسبقًا على بياناتك. اطلع على [دليل الضبط الدقيق](./training) للتعرف على كيفية القيام بذلك. وبعد ضبط نموذجك المُدرب مسبقًا، يرجى مراعاة [المشاركة](./model_sharing) النموذج مع المجتمع على Hub لمساعدة الجميع في مجال التعلم الآلي! 🤗 ## AutoClass <Youtube id="AhChOFRegn4"/> في الخلفية، تعمل فئتا [`AutoModelForSequenceClassification`] و [`AutoTokenizer`] معًا لتشغيل دالة pipeline() الذي استخدمتها أعلاه. تعتبر [AutoClass](./model_doc/auto) اختصارًا يقوم تلقائيًا باسترداد بنية نموذج مُدرب مسبقًا من اسمه أو مساره. كل ما عليك فعله هو تحديد فئة `AutoClass` المناسبة لمهمتك وفئة المعالجة المرتبطة بها. لنعد إلى المثال من القسم السابق ولنرى كيف يمكنك استخدام `AutoClass` لتكرار نتائج خط الأنابيب. ### المجزئ التلقائي (AutoTokenizer) يتولى المجزئ مسؤولية تحويل النص إلى مصفوفة من الأرقام (رموز) يمكن للنموذج فهمها ومعالجتها. هناك قواعد متعددة تحكم عملية التجزئة، بما في ذلك كيفية تقسيم كلمة وما هو المستوى الذي يجب أن تقسيم الكلمات عنده (تعرف على المزيد حول المعالجة في [ملخص المجزئ](./tokenizer_summary)). أهم شيء يجب تذكره هو أنك تحتاج إلى إنشاء مثيل للمجزئ بنفس اسم النموذج لضمان استخدامك لقواعد التجزئة نفسها التي تم تدريب النموذج عليها. قم بتحميل المجزئ باستخدام [`AutoTokenizer`]: ```py >>> from transformers import AutoTokenizer >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` مرر نصك إلى المجزئ: ```py >>> encoding = tokenizer("We are very happy to show you the 🤗 Transformers library.") >>> print(encoding) {'input_ids': [101, 11312, 10320, 12495, 19308, 10114, 11391, 10855, 10103, 100, 58263, 13299, 119, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} ``` يعيد المجزئ قاموسًا يحتوي على: * [input_ids](./glossary#input-ids): التمثيلات الرقمية لرموزك. * [attention_mask](./glossary#attention-mask): تشير إلى الرموز التي يجب الانتباه بها. يمكن المجزئ أيضًا قبول قائمة من المدخلات، ويقوم بـ "حشو" و"تقصير" النص لإرجاع كدفعة بطول موحد: <frameworkcontent> <pt> ```py >>> pt_batch = tokenizer( ... ["We are very happy to show you the 🤗 Transformers library.", "We hope you don't hate it."], ... padding=True, ... truncation=True, ... max_length=512, ... return_tensors="pt", ... ) ``` </pt> <tf> ```py >>> tf_batch = tokenizer( ... ["We are very happy to show you the 🤗 Transformers library.", "We hope you don't hate it."], ... padding=True, ... truncation=True, ... max_length=512, ... return_tensors="tf", ... ) ``` </tf> </frameworkcontent> <Tip> اطلع على [الدليل التمهيدي للمعالجة المسبقة](./preprocessing) للحصول على مزيد من التفاصيل حول المعالجة، وكيفية استخدام [`AutoImageProcessor`] و [`AutoFeatureExtractor`] و [`AutoProcessor`] لمعالجة الصور والصوت والإدخالات متعددة الوسائط. </Tip> ### AutoModel <frameworkcontent> <pt> تقدم مكتبة 🤗 Transformers طريقة بسيطة وموحدة لتحميل نماذج مدربة مسبقًا. وهذا يعني أنه يمكنك تحميل [`AutoModel`] كما لو كنت تقوم بتحميل [`AutoTokenizer`]. الفرق الوحيد هو اختيار فئة [`AutoModel`] المناسبة للمهمة. بالنسبة لتصنيف النص (أو التسلسل)، يجب عليك تحميل [`AutoModelForSequenceClassification`]: ```py >>> from transformers import AutoModelForSequenceClassification >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> pt_model = AutoModelForSequenceClassification.from_pretrained(model_name) ``` <Tip> راجع [ملخص المهمة](./task_summary) للاطلاع على المهام التي تدعمها فئة [`AutoModel`]. </Tip> الآن قم بتمرير دفعة المدخلات المُعالجة مسبقًا مباشرة إلى النموذج. عليك فقط فك تعبئة القاموس عن طريق إضافة `**`: # تدريب النموذج الآن، مرر دفعة المدخلات المعالجة مسبقًا مباشرة إلى النموذج. ما عليك سوى فك تعبئة القاموس عن طريق إضافة `**`: ```py >>> pt_outputs = pt_model(**pt_batch) ``` يُخرج النموذج التنشيطات النهائية في سمة `logits`. طبق دالة softmax على `logits` للحصول على الاحتمالات: ```py >>> from torch import nn >>> pt_predictions = nn.functional.softmax(pt_outputs.logits, dim=-1) >>> print(pt_predictions) tensor([[0.0021, 0.0018, 0.0115, 0.2121, 0.7725], [0.2084, 0.1826, 0.1969, 0.1755, 0.2365]], grad_fn=<SoftmaxBackward0>) ``` </pt> <tf> يوفر 🤗 Transformers طريقة بسيطة وموحدة لتحميل مثيلات مُدربة مسبقًا. وهذا يعني أنه يمكنك تحميل [`TFAutoModel`] مثل تحميل [`AutoTokenizer`]. والفرق الوحيد هو تحديد [`TFAutoModel`] الصحيح للمهمة. للتصنيف النصي (أو التسلسلي)، يجب تحميل [`TFAutoModelForSequenceClassification`]: ```py >>> from transformers import TFAutoModelForSequenceClassification >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(model_name) ``` <Tip> راجع [ملخص المهام](./task_summary) للمهام المدعومة بواسطة فئة [`AutoModel`]. </Tip> الآن، مرر دفعة المدخلات المعالجة مسبقًا مباشرة إلى النموذج. يمكنك تمرير المصفوفات كما هي: ```py >>> tf_outputs = tf_model(tf_batch) ``` يقوم النموذج بإخراج التنشيطات النهائية في سمة `logits`. طبق دالة softmax على `logits` لاسترداد الاحتمالات: ```py >>> import tensorflow as tf >>> tf_predictions = tf.nn.softmax(tf_outputs.logits, axis=-1) >>> tf_predictions # doctest: +IGNORE_RESULT ``` </tf> </frameworkcontent> <Tip> تخرج جميع نماذج 🤗 Transformers (PyTorch أو TensorFlow) المصفوفات *قبل* دالة التنشيط النهائية (مثل softmax) لأن دالة التنشيط النهائية غالبًا ما تكون مدمجة مع دالة الخسارة. نواتج النموذج عبارة عن فئات بيانات خاصة، لذلك يتم استكمال سماتها تلقائيًا في IDE. وتتصرف مخرجات النموذج مثل زوج مرتب أو قاموس (يمكنك الفهرسة باستخدام عدد صحيح ، شريحة، أو سلسلة)، وفي هذه الحالة، يتم تجاهل السمات التي تساوي None. </Tip> ### حفظ النموذج <frameworkcontent> <pt> بمجرد ضبط نموذجك، يمكنك حفظه مع برنامج الترميز الخاص به باستخدام [`PreTrainedModel.save_pretrained`]: ```py >>> pt_save_directory = "./pt_save_pretrained" >>> tokenizer.save_pretrained(pt_save_directory) # doctest: +IGNORE_RESULT >>> pt_model.save_pretrained(pt_save_directory) ``` عندما تكون مستعدًا لاستخدام النموذج مرة أخرى، أعد تحميله باستخدام [`PreTrainedModel.from_pretrained`]: ```py >>> pt_model = AutoModelForSequenceClassification.from_pretrained("./pt_save_pretrained") ``` </pt> <tf> بمجرد ضبط نموذجك، يمكنك حفظه مع برنامج الترميز الخاص به باستخدام [`TFPreTrainedModel.save_pretrained`]: ```py >>> tf_save_directory = "./tf_save_pretrained" >>> tokenizer.save_pretrained(tf_save_directory) # doctest: +IGNORE_RESULT >>> tf_model.save_pretrained(tf_save_directory) ``` عندما تكون مستعدًا لاستخدام النموذج مرة أخرى، أعد تحميله باستخدام [`TFPreTrainedModel.from_pretrained`]: ```py >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained("./tf_save_pretrained") ``` </tf> </frameworkcontent> من الميزات الرائعة في 🤗 Transformers القدرة على حفظ نموذج وإعادة تحميله كنموذج PyTorch أو TensorFlow. يمكن أن يحول معامل `from_pt` أو `from_tf` النموذج من إطار عمل إلى آخر: <frameworkcontent> <pt> ```py >>> from transformers import AutoModel >>> tokenizer = AutoTokenizer.from_pretrained(pt_save_directory) >>> pt_model = AutoModelForSequenceClassification.from_pretrained(pt_save_directory, from_pt=True) ``` </pt> <tf> ```py >>> from transformers import TFAutoModel >>> tokenizer = AutoTokenizer.from_pretrained(tf_save_directory) >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(tf_save_directory, from_tf=True) ``` </tf> </frameworkcontent> ## إنشاء نماذج مخصصة يمكنك تعديل فئة تكوين النموذج لتغيير كيفية بناء النموذج. يحدد التكوين سمات النموذج، مثل عدد الطبقات المخفية أو رؤوس الاهتمام. تبدأ من الصفر عند تهيئة نموذج من فئة تكوين مخصصة. يتم تهيئة سمات النموذج بشكل عشوائي، ويجب تدريب النموذج قبل استخدامه للحصول على نتائج ذات معنى. ابدأ باستيراد [`AutoConfig`]. ثم قم بتحميل النموذج المُدرب مسبقًا الذي تريد تعديله. ضمن [`AutoConfig.from_pretrained`]. يمكنك تحديد السمة التي تريد تغييرها، مثل عدد رؤوس الاهتمام: ```py >>> from transformers import AutoConfig >>> my_config = AutoConfig.from_pretrained("distilbert/distilbert-base-uncased", n_heads=12) ``` <frameworkcontent> <pt> قم بإنشاء نموذج من تكوينك المخصص باستخدام [`AutoModel.from_config`]: ```py >>> from transformers import AutoModel >>> my_model = AutoModel.from_config(my_config) ``` </pt> <tf> قم بإنشاء نموذج من تكوينك المخصص باستخدام [`TFAutoModel.from_config`]: ```py >>> from transformers import TFAutoModel >>> my_model = TFAutoModel.from_config(my_config) ``` </tf> </frameworkcontent> الق نظرة على دليل [إنشاء بنية مخصصة](./create_a_model) لمزيد من المعلومات حول بناء التكوينات المخصصة. ## المدرب - حلقة تدريب محسنة لـ PyTorch جميع النماذج عبارة عن [`torch.nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) قياسية، لذا يمكنك استخدامها في أي حلقة تدريب نموذجية. في حين يمكنك كتابة حلقة التدريب الخاصة بك، يوفر 🤗 Transformers فئة [`Trainer`] لـ PyTorch، والتي تحتوي على حلقة التدريب الأساسية وتضيف وظائف إضافية لميزات مثل التدريب الموزع، والدقة المختلطة، والمزيد. وفقًا لمهمتك، ستقوم عادةً بتمرير المعلمات التالية إلى [`Trainer`]: 1. ستبدأ بـ [`PreTrainedModel`] أو [`torch.nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module): ```py >>> from transformers import AutoModelForSequenceClassification >>> model = AutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` 2. تحتوي [`TrainingArguments`] على فرط معلمات النموذج التي يمكنك تغييرها مثل معدل التعلم، وحجم الدفعة، وعدد العصور التي يجب التدريب عليها. يتم استخدام القيم الافتراضية إذا لم تحدد أي حجج تدريب: ```py >>> from transformers import TrainingArguments >>> training_args = TrainingArguments( ... output_dir="path/to/save/folder/", ... learning_rate=2e-5, ... per_device_train_batch_size=8, ... per_device_eval_batch_size=8, ... num_train_epochs=2, ... ) ``` 3. قم بتحميل فئة معالجة مسبقة مثل برنامج الترميز، أو معالج الصور، أو مستخرج الميزات، أو المعالج: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased") ``` 4. قم بتحميل مجموعة بيانات: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("rotten_tomatoes") # doctest: +IGNORE_RESULT ``` 5. قم بإنشاء دالة لترميز مجموعة البيانات: ```py >>> def tokenize_dataset(dataset): ... return tokenizer(dataset["text"]) ``` ثم قم بتطبيقه على مجموعة البيانات بأكملها باستخدام [`~datasets.Dataset.map`]: ```py >>> dataset = dataset.map(tokenize_dataset, batched=True) ``` 6. [`DataCollatorWithPadding`] لإنشاء دفعة من الأمثلة من مجموعة البيانات الخاصة بك: ```py >>> from transformers import DataCollatorWithPadding >>> data_collator = DataCollatorWithPadding(tokenizer=tokenizer) ``` الآن قم بتجميع جميع هذه الفئات في [`Trainer`]: ```py >>> from transformers import Trainer >>> trainer = Trainer( ... model=model, ... args=training_args, ... train_dataset=dataset["train"], ... eval_dataset=dataset["test"], ... tokenizer=tokenizer, ... data_collator=data_collator, ... ) # doctest: +SKIP ``` عندما تكون مستعدًا، استدعِ [`~Trainer.train`] لبدء التدريب: ```py >>> trainer.train() # doctest: +SKIP ``` <Tip> بالنسبة للمهام - مثل الترجمة أو التلخيص - التي تستخدم نموذج تسلسل إلى تسلسل، استخدم فئات [`Seq2SeqTrainer`] و [`Seq2SeqTrainingArguments`] بدلاً من ذلك. </Tip> يمكنك تخصيص سلوك حلقة التدريب عن طريق إنشاء فئة فرعية من الطرق داخل [`Trainer`]. يسمح لك ذلك بتخصيص ميزات مثل دالة الخسارة، والمحسن، والمجدول. راجع مرجع [`Trainer`] للتعرف على الطرق التي يمكن إنشاء فئات فرعية منها. والطريقة الأخرى لتخصيص حلقة التدريب هي باستخدام [المستدعيات](./main_classes/callback). يمكنك استخدام المستدعيات للتكامل مع المكتبات الأخرى ومراقبة حلقة التدريب للإبلاغ عن التقدم أو إيقاف التدريب مبكرًا. لا تعدل المستدعيات أي شيء في حلقة التدريب نفسها. لتخصيص شيء مثل دالة الخسارة، تحتاج إلى إنشاء فئة فرعية من [`Trainer`] بدلاً من ذلك. ## التدريب باستخدام TensorFlow جميع النماذج عبارة عن [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) قياسية، لذا يمكن تدريبها في TensorFlow باستخدام واجهة برمجة تطبيقات Keras. يوفر 🤗 Transformers طريقة [`~TFPreTrainedModel.prepare_tf_dataset`] لتحميل مجموعة البيانات الخاصة بك بسهولة كـ `tf.data.Dataset` حتى تتمكن من البدء في التدريب على الفور باستخدام دالتي `compile` و`fit` في Keras. 1. ستبدأ بـ [`TFPreTrainedModel`] أو [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model): ```py >>> from transformers import TFAutoModelForSequenceClassification >>> model = TFAutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` 2. قم بتحميل فئة معالجة مسبقة مثل برنامج الترميز، أو معالج الصور، أو مستخرج الميزات، أو المعالج: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased") ``` 3. قم بإنشاء دالة لترميز مجموعة البيانات: ```py >>> def tokenize_dataset(dataset): ... return tokenizer(dataset["text"]) # doctest: +SKIP ``` 4. قم بتطبيق برنامج الترميز على مجموعة البيانات بأكملها باستخدام [`~datasets.Dataset.map`] ثم مرر مجموعة البيانات وبرنامج الترميز إلى [`~TFPreTrainedModel.prepare_tf_dataset`]. يمكنك أيضًا تغيير حجم الدفعة وخلط مجموعة البيانات هنا إذا أردت: ```py >>> dataset = dataset.map(tokenize_dataset) # doctest: +SKIP >>> tf_dataset = model.prepare_tf_dataset( ... dataset["train"], batch_size=16, shuffle=True, tokenizer=tokenizer ... ) # doctest: +SKIP ``` 5. عندما تكون مستعدًا، يمكنك استدعاء `compile` و`fit` لبدء التدريب. لاحظ أن جميع نماذج Transformers لديها دالة خسارة ذات صلة بالمهمة بشكل افتراضي، لذا فأنت لست بحاجة إلى تحديد واحدة ما لم ترغب في ذلك: ```py >>> from tensorflow.keras.optimizers import Adam >>> model.compile(optimizer='adam') # لا توجد وسيطة دالة الخسارة! >>> model.fit(tf_dataset) # doctest: +SKIP ``` ## ماذا بعد؟ الآن بعد أن أكملت الجولة السريعة في 🤗 Transformers، راجع أدلتنا لمعرفة كيفية القيام بأشياء أكثر تحديدًا مثل كتابة نموذج مخصص، وضبط نموذج مسبق التدريب لمهمة معينة، وكيفية تدريب نموذج باستخدام نص برمجي. إذا كنت مهتمًا بمعرفة المزيد عن المفاهيم الأساسية لـ 🤗 Transformers، فاحصل على فنجان من القهوة واطلع على أدلة المفاهيم الخاصة بنا!
transformers/docs/source/ar/quicktour.md/0
{ "file_path": "transformers/docs/source/ar/quicktour.md", "repo_id": "transformers", "token_count": 15439 }
359
# ملخص عن المجزئات اللغوية [[open-in-colab]] في هذه الصفحة، سنتناول بالتفصيل عملية التجزئة. <Youtube id="VFp38yj8h3A"/> كما رأينا في [برنامج تعليمي حول المعالجة المسبقة](preprocessing)، فإن تجزئة النص يقسمه إلى كلمات أو الرموز الفرعية (كلمات جزئية)، والتي يتم بعد ذلك تحويلها إلى معرفات من خلال قائمة بحث. يعد تحويل الكلمات أو الرموز الفرعية إلى معرفات مباشرًا، لذا في هذا الملخص، سنركز على تقسيم النص إلى كلمات أو رموز فرعية (أي تجزئة النص). وبشكل أكثر تحديدًا، سنلقي نظرة على الأنواع الثلاثة الرئيسية من المُجزئات اللغوية المستخدمة في 🤗 المحولات: [ترميز الأزواج البايتية (BPE)](#byte-pair-encoding)، [WordPiece](#wordpiece)، و [SentencePiece](#sentencepiece)، ونعرض أمثلة على نوع المُجزئة الذي يستخدمه كل نموذج. لاحظ أنه في كل صفحة نموذج، يمكنك الاطلاع على وثائق المُجزئة المرتبط لمعرفة نوع المُجزئ الذي استخدمه النموذج المُدرب مسبقًا. على سبيل المثال، إذا نظرنا إلى [`BertTokenizer`]، يمكننا أن نرى أن النموذج يستخدم [WordPiece](#wordpiece). ## مقدمة إن تقسيم النص إلى أجزاء أصغر هو مهمة أصعب مما تبدو، وهناك طرق متعددة للقيام بذلك. على سبيل المثال، دعنا نلقي نظرة على الجملة `"Don't you love 🤗 Transformers? We sure do."` <Youtube id="nhJxYji1aho"/> يمكن تقسيم هذه الجملة ببساطة عن طريق المسافات، مما سينتج عنه ما يلي:``` ``` ["Don't", "you", "love", "🤗", "Transformers?", "We", "sure", "do."] ``` هذه خطوة أولى منطقية، ولكن إذا نظرنا إلى الرموز `"Transformers?"` و `"do."`، فإننا نلاحظ أن علامات الترقيم مُرفقة بالكلمات `"Transformer"` و `"do"`، وهو أمر ليس مثالي. يجب أن نأخذ علامات الترقيم في الاعتبار حتى لا يضطر النموذج إلى تعلم تمثيل مختلف للكلمة وكل رمز ترقيم مُحتمل قد يليها، الأمر الذي من شأنه أن يزيد بشكل هائل عدد التمثيلات التي يجب على النموذج تعلمها. مع مراعاة علامات الترقيم، سيُصبح تقسيم نصنا على النحو التالي: ``` ["Don", "'", "t", "you", "love", "🤗", "Transformers", "?", "We", "sure", "do", "."] ``` أفضل. ومع ذلك، من غير الملائم كيفية تقسيم الكلمة `"Don't"`. `"Don't"` تعني `"do not"`، لذا سيكون من الأفضل تحليلها على أنها كلمتين مُدمجتين `["Do"، "n't"]`. هنا تبدأ الأمور في التعقيد، وهو جزء من سبب امتلاك كل نموذج لنوّعه الخاص من مُجزّئ النصوص (tokenizer). اعتمادًا على القواعد التي نطبقها لتقسيم النص، يسيتم إنشاء مخرجات مُجزّأة مُختلفة لنفس النص. ولن يؤدي النموذج المُدرب مسبقًا إلى الأداء بشكل صحيح إلا إذا قُدّم له مُدخل تم تقسيمه بنفس القواعد التي تم استخدامها لتقسيم بيانات التدريب الخاصة به. يُعد كل من [spaCy](https://spacy.io/) و [Moses](http://www.statmt.org/moses/?n=Development.GetStarted) هما مجزّئي النصوص التي تعتمد على القواعد الشائعة. عند تطبيقها على مثالنا، فإن *spaCy* و *Moses* ستخرج نّصًا مثل: ``` ["Do", "n't", "you", "love", "🤗", "Transformers", "?", "We", "sure", "do", "."] ``` كما يمكنك أن ترى، يتم هنا استخدام التقسيم المكاني والترقيم، وكذلك تقسيم الكلمات القائم على القواعد. يعد التقسيم المكاني والترقيم والتحليل القائم على القواعد كلاهما مثالين على تقسيم الكلمات، والذي يُعرّف بشكل غير مُحدد على أنه تقسيم الجُمل إلى كلمات. في حين أنها الطريقة الأكثر بديهية لتقسيم النصوص إلى أجزاء أصغر، يمكن أنها تؤدى إلى مشكلات لمجموعات النصوص الضخمة. في هذه الحالة، عادةً ما يؤدي التقسيم المكاني والترقيم إلى إنشاء مفردات كبيرة جدًا (مجموعة من جميع الكلمات والرموز الفريدة المستخدمة). على سبيل المثال، يستخدم [Transformer XL](model_doc/transfo-xl) التقسيم المكاني والترقيم، مما يؤدي إلى حجم مُفردات يبلغ 267735! يفرض حجم المُفردات الكبير هذا على النموذج أن يكون لديه مصفوفة تضمين (embedding matrix) ضخمة كطبقة إدخال وإخراج، مما يؤدي إلى زيادة كل من التعقيد الزمني والذاكرة. بشكل عام، نادرًا ما يكون لدى نماذج المحولات حجم مفردات أكبر من 50000، خاصة إذا تم تدريبها مسبقًا على لغة واحدة فقط. لذا إذا كان التقسيم المكاني و الترقيم البسيط غير مرضٍ، فلماذا لا نقسّم الحروف ببساطة؟ <Youtube id="ssLq_EK2jLE"/> في حين أن تقسيم الأحرف بسيط للغاية ومن شأنه أن يقلل بشكل كبير من التعقيد الزمني والذاكرة، إلا أنه يجعل من الصعب على النموذج تعلم تمثيلات المدخلات ذات معنى. على سبيل المثال، يعد تعلم تمثيل مستقل عن السياق للحرف "t" أكثر صعوبة من تعلم تمثيل مستقل عن السياق لكلمة "اليوم". لذلك، غالبًا ما يكون تحليل الأحرف مصحوبًا بفقدان الأداء. لذا للحصول على أفضل ما في العالمين، تستخدم نماذج المحولات نظامًا هجينًا بين تقسيم على مستوى الكلمة وتقسيم علي مستوى الأحرف يسمى **تقسيم الوحدات الفرعية للّغة** (subword tokenization). ## تقسيم الوحدات الفرعية للّغة (Subword Tokenization) <Youtube id="zHvTiHr506c"/> تعتمد خوارزميات تقسيم الوحدات الفرعية subword على المبدأ القائل بأن الكلمات الشائعة الاستخدام لا ينبغي تقسيمها إلى وحدات فرعية أصغر، ولكن يجب تفكيك الكلمات النادرة إلى رموز فرعية ذات معنى. على سبيل المثال، قد يتم اعتبار "annoyingly" كلمة نادرة ويمكن تحليلها إلى "annoying" و "ly". كل من "annoying" و "ly" كـ subwords مستقلة ستظهر بشكل متكرر أكثر في حين أن معنى "annoyingly" يتم الاحتفاظ به من خلال المعنى المركب لـ "annoying" و "ly". هذا مفيد بشكل خاص في اللغات التلصيقية مثل التركية، حيث يمكنك تشكيل كلمات مُركبة طويلة (تقريبًا) بشكل تعسفي عن طريق ضم الرموز الفرعية معًا. يسمح تقسيم subword للنموذج بأن يكون له حجم مفردات معقول مع القدرة على تعلم تمثيلات مستقلة عن السياق ذات معنى. بالإضافة إلى ذلك، يمكّن تقسيم subword النموذج من معالجة الكلمات التي لم يسبق له رؤيتها من قبل، عن طريق تحليلها إلى رموز فرعية معروفة. على سبيل المثال، يقوم المحلل [`~transformers.BertTokenizer`] بتحليل"I have a new GPU!" كما يلي: ```py >>> from transformers import BertTokenizer >>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased") >>> tokenizer.tokenize("I have a new GPU!") ["i", "have", "a", "new", "gp", "##u", "!"] ``` نظرًا لأننا نستخدم نموذجًا غير حساس لحالة الأحرف (uncased model)، فقد تم تحويل الجملة إلى أحرف صغيرة أولاً. يمكننا أن نرى أن الكلمات `["i"، "have"، "a"، "new"]` موجودة في مفردات مُجزّئ النصوص، ولكن الكلمة "gpu" غير موجودة. وبالتالي، يقوم مُجزّئ النصوص بتقسيم "gpu" إلى رموز فرعية معروفة: `["gp" و "##u"]`. يعني "##" أنه يجب ربط بقية الرمز بالرمز السابق، دون مسافة (للترميز أو عكس عملية تقسيم الرموز). كمثال آخر، يقوم المحلل [`~transformers.XLNetTokenizer`] بتقسيم نّص مثالنا السابق كما يلي: ```py >>> from transformers import XLNetTokenizer >>> tokenizer = XLNetTokenizer.from_pretrained("xlnet/xlnet-base-cased") >>> tokenizer.tokenize("Don't you love 🤗 Transformers? We sure do.") ["▁Don", "'", "t", "▁you", "▁love", "▁"، "🤗"، "▁"، "Transform"، "ers"، "؟"، "▁We"، "▁sure"، "▁do"، "."] ``` سنعود إلى معنى تلك `"▁"` عندما نلقي نظرة على [SentencePiece](#sentencepiece). كما يمكنك أن ترى، تم تقسيم الكلمة النادرة "Transformers" إلى الرموز الفرعية الأكثر تكرارًا `"Transform"` و `"ers"`. دعنا الآن نلقي نظرة على كيفية عمل خوارزميات تقسيم subword المختلفة. لاحظ أن جميع خوارزميات التقسيم هذه تعتمد على بعض أشكال التدريب الذي يتم عادةً على مجموعة البيانات التي سيتم تدريبها النموذج عليها. <a id='byte-pair-encoding'></a> ### ترميز الأزواج البايتية (BPE) تم تقديم رميز أزواج البايت (BPE) في ورقة بحثية بعنوان [الترجمة الآلية العصبية للكلمات النادرة باستخدام وحدات subword (Sennrich et al.، 2015)](https://huggingface.co/papers/1508.07909). يعتمد BPE على مُجزّئ أولي يقسم بيانات التدريب إلى كلمات. يمكن أن يكون التحليل المسبق بسيطًا مثل التقسيم المكاني، على سبيل المثال [GPT-2](model_doc/gpt2)، [RoBERTa](model_doc/roberta). تشمل التقسيم الأكثر تقدمًا معتمد على التحليل القائم على القواعد، على سبيل المثال [XLM](model_doc/xlm)، [FlauBERT](model_doc/flaubert) الذي يستخدم Moses لمعظم اللغات، أو [GPT](model_doc/openai-gpt) الذي يستخدم spaCy و ftfy، لحساب تكرار كل كلمة في مجموعة بيانات التدريب. بعد التحليل المسبق، يتم إنشاء مجموعة من الكلمات الفريدة وقد تم تحديد تكرار كل كلمة في تم تحديد بيانات التدريب. بعد ذلك، يقوم BPE بإنشاء مفردات أساسية تتكون من جميع الرموز التي تحدث في مجموعة الكلمات الفريدة ويتعلم قواعد الدمج لتشكيل رمز جديد من رمزين من المفردات الأساسية. إنه يفعل ذلك حتى تصل المفردات إلى حجم المفردات المطلوب. لاحظ أن حجم المفردات هو فرط معلمة لتحديد قبل تدريب مُجزّئ النصوص. كمثال، دعنا نفترض أنه بعد التقسيم الأولي، تم تحديد مجموعة الكلمات التالية بما في ذلك تكرارها: ``` ("hug", 10), ("pug", 5), ("pun", 12), ("bun", 4), ("hugs", 5) ``` وبالتالي، فإن المفردات الأساسية هي `["b"، "g"، "h"، "n"، "p"، "s"، "u"]`. من خلال تقسيم جميع الكلمات إلى رموز من المفردات الأساسية، نحصل على: ``` ("h" "u" "g"، 10)، ("p" "u" "g"، 5)، ("p" "u" "n"، 12)، ("b" "u" "n"، 4)، ("h" "u" "g" "s"، 5) ``` بعد ذلك، يقوم BPE بعدد مرات حدوث كل زوج من الرموز المحتملة ويختار زوج الرموز الذي يحدث بشكل متكرر. في في المثال أعلاه، يحدث "h" متبوعًا بـ "u" _10 + 5 = 15_ مرة (10 مرات في 10 مرات حدوث "hug"، 5 مرات في 5 مرات حدوث "hugs"). ومع ذلك، فإن أكثر أزواج الرموز شيوعًا هو "u" متبوعًا بواسطة "g"، والتي تحدث _10 + 5 + 5 = 20_ مرة في المجموع. وبالتالي، فإن أول قاعدة دمج يتعلمها المحلل هي تجميع جميع رموز "u" التي تتبعها "g" معًا. بعد ذلك، يتم إضافة "ug" إلى المفردات. تصبح مجموعة الكلمات ``` ("h" "ug"، 10)، ("p" "ug"، 5)، ("p" "u" "n"، 12)، ("b" "u" "n"، 4)، ("h" "ug" "s"، 5) ``` بعد ذلك، يحدد BPE ثاني أكثر أزواج الرموز شيوعًا. إنه "u" متبوعًا بـ "n"، والذي يحدث 16 مرة. "u"، يتم دمج "n" في "un" ويضاف إلى المفردات. ثالث أكثر أزواج الرموز شيوعًا هو "h" متبوعًا بواسطة "ug"، والتي تحدث 15 مرة. مرة أخرى يتم دمج الزوج ويتم إضافة "hug" إلى المفردات. في هذه المرحلة، تكون المفردات هي `["b"، "g"، "h"، "n"، "p"، "s"، "u"، "ug"، "un"، "hug"]` ومجموعة الكلمات الفريدة لدينا تمثيله كما يلي: ``` ("hug", 10), ("p" "ug", 5), ("p" "un", 12), ("b" "un", 4), ("hug" "s", 5) ``` بافتراض أن تدريب ترميز الأزواج البايت سيتوقف عند هذه النقطة، فسيتم تطبيق قواعد الدمج التي تم تعلمها بعد ذلك على الكلمات الجديدة (طالما أن هذه الكلمات الجديدة لا تشمل رموزًا لم تكن في المفردات الأساسية). على سبيل المثال، سيتم تقسيم كلمة "bug" إلى `["b"، "ug"]` ولكن سيتم تقسيم "mug" على أنها `["<unk>"، "ug"]` نظرًا لأن الرمز "m" غير موجود في المفردات الأساسية. بشكل عام، لا يتم استبدال الأحرف الفردية مثل "m" بالرمز "<unk>" لأن بيانات التدريب تتضمن عادةً ظهورًا واحدًا على الأقل لكل حرف، ولكن من المحتمل أن يحدث ذلك لرموز خاصة جدًا مثل الرموز التعبيرية. كما ذكرنا سابقًا، فإن حجم المفردات، أي حجم المفردات الأساسية + عدد عمليات الدمج، هو معامل يجب اختياره. على سبيل المثال، لدى [GPT](model_doc/openai-gpt) حجم مفردات يبلغ 40478 منذ أن كان لديهم 478 حرفًا أساسيًا واختاروا التوقف عن التدريب بعد 40,000 عملية دمج. #### ترميز الأزواج البايتية على مستوى البايت قد تكون المفردات الأساسية التي تتضمن جميع الأحرف الأساسية كبيرة جدًا إذا *على سبيل المثال* تم اعتبار جميع أحرف اليونيكود كأحرف أساسية. لذا، ليكون لديك مفردات أساسية أفضل، يستخدم [GPT-2](https://cdn.openai.com/better-language-models/language_models_are_unsupervised_multitask_learners.pdf) البايتات كمفردات أساسية، وهي حيلة ذكية لإجبار المفردات الأساسية على أن تكون بحجم 256 مع ضمان أن يتم تضمين كل حرف أساسي في المفردات. مع بعض القواعد الإضافية للتعامل مع علامات الترقيم، يمكن لمُجزّئ النصوص GPT2 تجزئة أي نص دون الحاجة إلى رمز <unk>. لدى [GPT-2](model_doc/gpt) حجم مفردات يبلغ 50257، والذي يتوافق مع رموز 256 base byte، ورمز خاص لنهاية النص والرموز التي تم تعلمها باستخدام 50000 عملية دمج. <a id='wordpiece'></a> ### WordPiece تعتبر WordPiece خوارزمية تجزئة الكلمات الفرعية subword المستخدمة لـ [BERT](model_doc/bert)، [DistilBERT](model_doc/distilbert)، و [Electra](model_doc/electra). تم توضيح الخوارزمية في [البحث الصوتي الياباني والكوري (Schuster et al.، 2012)](https://static.googleusercontent.com/media/research.google.com/ja//pubs/archive/37842.pdf) وهو مشابه جدًا BPE. أولاً، يقوم WordPiece بتكوين المفردات لتضمين كل حرف موجود في بيانات التدريب وتعلم تدريجياً عددًا معينًا من قواعد الدمج. على عكس BPE، لا يختار WordPiece أكثر زوج الرموز المتكررة، ولكن تلك التي تزيد من احتمال بيانات التدريب بمجرد إضافتها إلى المفردات. لذا، ماذا يعني هذا بالضبط؟ بالإشارة إلى المثال السابق، فإن زيادة احتمال بيانات التدريب تعادل إيجاد زوج الرموز، الذي يكون احتمال تقسيمه على احتمالات رمزه الأول تليها رمزه الثاني هو الأكبر بين جميع أزواج الرموز. *مثال* `"u"`، تليها `"g"` كانت قد اندمجت فقط إذا كان احتمال `"ug"` مقسومًا على `"u"`، `"g"` كان سيكون أكبر من أي زوج آخر من الرموز. بديهيًا، WordPiece مختلف قليلاً عن BPE في أنه يقيم ما يفقده عن طريق دمج رمزين للتأكد من أنه يستحق ذلك. <a id='unigram'></a> ### Unigram Unigram هو خوارزمية توكنيز subword التي تم تقديمها في [تنظيم subword: تحسين نماذج الترجمة الشبكة العصبية نماذج مع مرشحين subword متعددة (Kudo، 2018)](https://huggingface.co/papers/1804.10959). على عكس BPE أو WordPiece، يقوم Unigram بتكوين مفرداته الأساسية إلى عدد كبير من الرموز ويقللها تدريجياً للحصول على مفردات أصغر. يمكن أن تتوافق المفردات الأساسية على سبيل المثال مع جميع الكلمات المسبقة التوكنز والسلاسل الفرعية الأكثر شيوعًا. لا يتم استخدام Unigram مباشرة لأي من النماذج في المحولات، ولكنه يستخدم بالاقتران مع [SentencePiece](#sentencepiece). في كل خطوة تدريب، يحدد خوارزمية Unigram خسارة (غالبًا ما يتم تعريفها على أنها اللوغاريتم) عبر بيانات التدريب بالنظر إلى المفردات الحالية ونموذج اللغة unigram. بعد ذلك، بالنسبة لكل رمز في المفردات، يحسب الخوارزمية مقدار زيادة الخسارة الإجمالية إذا تم إزالة الرمز من المفردات. ثم يقوم Unigram بإزالة p (مع p عادة ما تكون 10% أو 20%) في المائة من الرموز التي تكون زيادة الخسارة فيها هي الأدنى، *أي* تلك الرموز التي تؤثر أقل على الخسارة الإجمالية عبر بيانات التدريب. تتكرر هذه العملية حتى تصل المفردات إلى الحجم المطلوب. يحتفظ خوارزمية Unigram دائمًا بالشخصيات الأساسية بحيث يمكن توكنز أي كلمة. نظرًا لأن Unigram لا يعتمد على قواعد الدمج (على عكس BPE وWordPiece)، فإن للخوارزمية عدة طرق توكنز نص جديد بعد التدريب. على سبيل المثال، إذا كان محول Unigram المدرب يعرض المفردات: ``` ["b"، "g"، "h"، "n"، "p"، "s"، "u"، "ug"، "un"، "hug"]، ``` يمكن توكنز `"hugs"` على أنه `["hug"، "s"]`، أو `["h"، "ug"، "s"]` أو `["h"، "u"، "g"، "s"]`. إذن ماذا لاختيار؟ يحفظ Unigram احتمال كل رمز في فيلق التدريب بالإضافة إلى حفظ المفردات بحيث يمكن حساب احتمال كل توكنز ممكن بعد التدريب. ببساطة، يختار الخوارزمية الأكثر توكنز المحتملة في الممارسة، ولكنه يوفر أيضًا إمكانية أخذ عينات من توكنز ممكن وفقًا لاحتمالاتها. تتم تعريف هذه الاحتمالات بواسطة الخسارة التي يتم تدريب المحول عليها. بافتراض أن بيانات التدريب تتكون من الكلمات \\(x_{1}، \dots، x_{N}\\) وأن مجموعة جميع التوكنزات الممكنة لكلمة \\(x_{i}\\) هي يتم تعريفها على أنها \\(S(x_{i})\\)، ثم يتم تعريف الخسارة الإجمالية على النحو التالي $$\mathcal{L} = -\sum_{i=1}^{N} \log \left ( \sum_{x \in S(x_{i})} p(x) \right )$$ <a id='sentencepiece'></a> ### SentencePiece تحتوي جميع خوارزميات توكنز الموصوفة حتى الآن على نفس المشكلة: من المفترض أن النص المدخل يستخدم المسافات لفصل الكلمات. ومع ذلك، لا تستخدم جميع اللغات المسافات لفصل الكلمات. أحد الحلول الممكنة هو استخداممعالج مسبق للغة محدد، *مثال* [XLM](model_doc/xlm) يلذي يستخدم معالجات مسبقة محددة للصينية واليابانية والتايلاندية. لحل هذه المشكلة بشكل أعم، [SentencePiece: A simple and language independent subword tokenizer and detokenizer for Neural Text Processing (Kudo et al.، 2018)](https://huggingface.co/papers/1808.06226) يتعامل مع المدخلات كتدفق بيانات خام، وبالتالي يشمل المسافة في مجموعة الأحرف التي سيتم استخدامها. ثم يستخدم خوارزمية BPE أو unigram لبناء المفردات المناسبة. يستخدم [`XLNetTokenizer`] SentencePiece على سبيل المثال، وهو أيضًا سبب تضمين تم تضمين حرف `"▁"` في المفردات. عملية فك التشفير باستخدام SentencePiece سهلة للغاية نظرًا لأنه يمكن دائمًا دمج الرموز معًا واستبدال `"▁"` بمسافة. تستخدم جميع نماذج المحولات في المكتبة التي تستخدم SentencePiece بالاقتران مع unigram. أمثلة على النماذج باستخدام SentencePiece هي [ALBERT](model_doc/albert)، [XLNet](model_doc/xlnet)، [Marian](model_doc/marian)، و [T5](model_doc/t5).
transformers/docs/source/ar/tokenizer_summary.md/0
{ "file_path": "transformers/docs/source/ar/tokenizer_summary.md", "repo_id": "transformers", "token_count": 13439 }
360
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Adapter mit 🤗 PEFT laden [[open-in-colab]] Die [Parameter-Efficient Fine Tuning (PEFT)](https://huggingface.co/blog/peft) Methoden frieren die vorab trainierten Modellparameter während der Feinabstimmung ein und fügen eine kleine Anzahl trainierbarer Parameter (die Adapter) hinzu. Die Adapter werden trainiert, um aufgabenspezifische Informationen zu lernen. Es hat sich gezeigt, dass dieser Ansatz sehr speichereffizient ist und weniger Rechenleistung beansprucht, während die Ergebnisse mit denen eines vollständig feinabgestimmten Modells vergleichbar sind. Adapter, die mit PEFT trainiert wurden, sind in der Regel um eine Größenordnung kleiner als das vollständige Modell, so dass sie bequem gemeinsam genutzt, gespeichert und geladen werden können. <div class="flex flex-col justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/PEFT-hub-screenshot.png"/> <figcaption class="text-center">Die Adaptergewichte für ein OPTForCausalLM-Modell, die auf dem Hub gespeichert sind, sind nur ~6MB groß, verglichen mit der vollen Größe der Modellgewichte, die ~700MB betragen können.</figcaption> </div> Wenn Sie mehr über die 🤗 PEFT-Bibliothek erfahren möchten, sehen Sie sich die [Dokumentation](https://huggingface.co/docs/peft/index) an. ## Setup Starten Sie mit der Installation von 🤗 PEFT: ```bash pip install peft ``` Wenn Sie die brandneuen Funktionen ausprobieren möchten, sollten Sie die Bibliothek aus dem Quellcode installieren: ```bash pip install git+https://github.com/huggingface/peft.git ``` ## Unterstützte PEFT-Modelle Transformers unterstützt nativ einige PEFT-Methoden, d.h. Sie können lokal oder auf dem Hub gespeicherte Adaptergewichte laden und sie mit wenigen Zeilen Code einfach ausführen oder trainieren. Die folgenden Methoden werden unterstützt: - [Low Rank Adapters](https://huggingface.co/docs/peft/conceptual_guides/lora) - [IA3](https://huggingface.co/docs/peft/conceptual_guides/ia3) - [AdaLoRA](https://huggingface.co/papers/2303.10512) Wenn Sie andere PEFT-Methoden, wie z.B. Prompt Learning oder Prompt Tuning, verwenden möchten, oder über die 🤗 PEFT-Bibliothek im Allgemeinen, lesen Sie bitte die [Dokumentation](https://huggingface.co/docs/peft/index). ## Laden Sie einen PEFT-Adapter Um ein PEFT-Adaptermodell von 🤗 Transformers zu laden und zu verwenden, stellen Sie sicher, dass das Hub-Repository oder das lokale Verzeichnis eine `adapter_config.json`-Datei und die Adaptergewichte enthält, wie im obigen Beispielbild gezeigt. Dann können Sie das PEFT-Adaptermodell mit der Klasse `AutoModelFor` laden. Um zum Beispiel ein PEFT-Adaptermodell für die kausale Sprachmodellierung zu laden: 1. Geben Sie die PEFT-Modell-ID an. 2. übergeben Sie es an die Klasse [`AutoModelForCausalLM`]. ```py from transformers import AutoModelForCausalLM, AutoTokenizer peft_model_id = "ybelkada/opt-350m-lora" model = AutoModelForCausalLM.from_pretrained(peft_model_id) ``` <Tip> Sie können einen PEFT-Adapter entweder mit einer `AutoModelFor`-Klasse oder der Basismodellklasse wie `OPTForCausalLM` oder `LlamaForCausalLM` laden. </Tip> Sie können einen PEFT-Adapter auch laden, indem Sie die Methode `load_adapter` aufrufen: ```py from transformers import AutoModelForCausalLM, AutoTokenizer model_id = "facebook/opt-350m" peft_model_id = "ybelkada/opt-350m-lora" model = AutoModelForCausalLM.from_pretrained(model_id) model.load_adapter(peft_model_id) ``` ## Laden in 8bit oder 4bit Die `bitsandbytes`-Integration unterstützt Datentypen mit 8bit und 4bit Genauigkeit, was für das Laden großer Modelle nützlich ist, weil es Speicher spart (lesen Sie den `bitsandbytes`-Integrations [guide](./quantization#bitsandbytes-integration), um mehr zu erfahren). Fügen Sie die Parameter `load_in_8bit` oder `load_in_4bit` zu [`~PreTrainedModel.from_pretrained`] hinzu und setzen Sie `device_map="auto"`, um das Modell effektiv auf Ihre Hardware zu verteilen: ```py from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig peft_model_id = "ybelkada/opt-350m-lora" model = AutoModelForCausalLM.from_pretrained(peft_model_id, quantization_config=BitsAndBytesConfig(load_in_8bit=True)) ``` ## Einen neuen Adapter hinzufügen Sie können [`~peft.PeftModel.add_adapter`] verwenden, um einen neuen Adapter zu einem Modell mit einem bestehenden Adapter hinzuzufügen, solange der neue Adapter vom gleichen Typ ist wie der aktuelle Adapter. Wenn Sie zum Beispiel einen bestehenden LoRA-Adapter an ein Modell angehängt haben: ```py from transformers import AutoModelForCausalLM, OPTForCausalLM, AutoTokenizer from peft import PeftConfig model_id = "facebook/opt-350m" model = AutoModelForCausalLM.from_pretrained(model_id) lora_config = LoraConfig( target_modules=["q_proj", "k_proj"], init_lora_weights=False ) model.add_adapter(lora_config, adapter_name="adapter_1") ``` Um einen neuen Adapter hinzuzufügen: ```py # attach new adapter with same config model.add_adapter(lora_config, adapter_name="adapter_2") ``` Jetzt können Sie mit [`~peft.PeftModel.set_adapter`] festlegen, welcher Adapter verwendet werden soll: ```py # use adapter_1 model.set_adapter("adapter_1") output = model.generate(**inputs) print(tokenizer.decode(output_disabled[0], skip_special_tokens=True)) # use adapter_2 model.set_adapter("adapter_2") output_enabled = model.generate(**inputs) print(tokenizer.decode(output_enabled[0], skip_special_tokens=True)) ``` ## Aktivieren und Deaktivieren von Adaptern Sobald Sie einen Adapter zu einem Modell hinzugefügt haben, können Sie das Adaptermodul aktivieren oder deaktivieren. So aktivieren Sie das Adaptermodul: ```py from transformers import AutoModelForCausalLM, OPTForCausalLM, AutoTokenizer from peft import PeftConfig model_id = "facebook/opt-350m" adapter_model_id = "ybelkada/opt-350m-lora" tokenizer = AutoTokenizer.from_pretrained(model_id) text = "Hello" inputs = tokenizer(text, return_tensors="pt") model = AutoModelForCausalLM.from_pretrained(model_id) peft_config = PeftConfig.from_pretrained(adapter_model_id) # to initiate with random weights peft_config.init_lora_weights = False model.add_adapter(peft_config) model.enable_adapters() output = model.generate(**inputs) ``` So deaktivieren Sie das Adaptermodul: ```py model.disable_adapters() output = model.generate(**inputs) ``` ## PEFT-Adapter trainieren PEFT-Adapter werden von der Klasse [`Trainer`] unterstützt, so dass Sie einen Adapter für Ihren speziellen Anwendungsfall trainieren können. Dazu müssen Sie nur ein paar weitere Codezeilen hinzufügen. Zum Beispiel, um einen LoRA-Adapter zu trainieren: <Tip> Wenn Sie mit der Feinabstimmung eines Modells mit [`Trainer`] noch nicht vertraut sind, werfen Sie einen Blick auf das Tutorial [Feinabstimmung eines vortrainierten Modells](Training). </Tip> 1. Definieren Sie Ihre Adapterkonfiguration mit dem Aufgabentyp und den Hyperparametern (siehe [`~peft.LoraConfig`] für weitere Details darüber, was die Hyperparameter tun). ```py from peft import LoraConfig peft_config = LoraConfig( lora_alpha=16, lora_dropout=0.1, r=64, bias="none", task_type="CAUSAL_LM", ) ``` 2. Fügen Sie dem Modell einen Adapter hinzu. ```py model.add_adapter(peft_config) ``` 3. Jetzt können Sie das Modell an [`Trainer`] übergeben! ```py trainer = Trainer(model=model, ...) trainer.train() ``` So speichern Sie Ihren trainierten Adapter und laden ihn wieder: ```py model.save_pretrained(save_dir) model = AutoModelForCausalLM.from_pretrained(save_dir) ``` <!-- TODO: (@younesbelkada @stevhliu) - Link to PEFT docs for further details - Trainer - 8-bit / 4-bit examples ? -->
transformers/docs/source/de/peft.md/0
{ "file_path": "transformers/docs/source/de/peft.md", "repo_id": "transformers", "token_count": 3186 }
361
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Attention Interface This page describes how to use the `AttentionInterface` in order to register custom attention functions to use with supported models. ## Customizing attention function Most recent models can now switch from one attention function used in the Attention layer to the other, thanks to a simple mapping. By default, we provide the implementation for [`sdpa`](https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html), [`flash_attention_2`](https://github.com/Dao-AILab/flash-attention) and [`flex_attention`](https://pytorch.org/docs/stable/nn.attention.flex_attention.html#module-torch.nn.attention.flex_attention) as well as `eager`, which is a simple matrix multiplication without any optimization on top. This is the setting you can usually choose when instantiating a model: ```python from transformers import AutoModelForCausalLM model_id = "meta-llama/Llama-3.2-1B" # Here, using flash attention as an example model = AutoModelForCausalLM.from_pretrained(model_id, attn_implementation="flash_attention_2") ``` But what if you wanted to create your own attention function? Or simply play around with existing ones, adding a few statements here and there? You can now do so with the `AttentionInterface`! Here is an example: ```python from transformers import AutoModelForCausalLM, AttentionInterface from transformers.integrations.sdpa_attention import sdpa_attention_forward import torch model_id = "meta-llama/Llama-3.2-1B" def my_new_sdpa(*args, **kwargs): print("I just entered the attention computation") return sdpa_attention_forward(*args, **kwargs) AttentionInterface.register("my_new_sdpa", my_new_sdpa) model = AutoModelForCausalLM.from_pretrained(model_id, attn_implementation="my_new_sdpa") # Try running the forward with the new attention function model(torch.ones(1, 5, dtype=int)) ``` You will see it prints "I just entered the attention computation" as many times as there are layers in the model (with this example, 16 times). ## Dynamically switching attention function You could dynamically change the model's attention function as well: ```python # Back to use original sdpa implementation model.set_attn_implementation("sdpa") model(torch.ones(1, 5, dtype=int)) ``` and it will stop printing the statements, as it now uses the `sdpa` attention. This allows to quickly change an attention function, without needing to reload the model! ## Different attention per backbone in multimodal models For multimodal models different attention functions may work better for each backbone module. For example, some vision backbones perform better in fp32, but are incompatible with FlashAttention. To continue using FlashAttention while keeping the vision encoder in fp32, create a dict and map each config to an attention implementation as shown below. ```python from transformers import AutoModelForImageTextToText model_id = "facebook/chameleon-7b" attention_implementation_per_backbone = {"vision_config": "sdpa", "text_config": "flash_attention_2"} model = AutoModelForImageTextToText.from_pretrained(model_id, attn_implementation=attention_implementation_per_backbone) # NOTE: keys in the attention implementation have to be the same as the sub-config names for key in attention_implementation_per_backbone: assert key in model.config.sub_configs, f"Invalid key in `attention_implementation`" # You can omit certain backbones - the default attention function (SDPA) will be used # This is equivalent to the previous example model = AutoModelForImageTextToText.from_pretrained(model_id, attn_implementation={"text_config": "flash_attention_2"}) # Set the same attention implementation for all backbones with single string, same as in non-multimodal models model = AutoModelForImageTextToText.from_pretrained(model_id, attn_implementation="eager") # Alternatively use a dict with an empty key for global configuration model = AutoModelForImageTextToText.from_pretrained(model_id, attn_implementation={"": "eager"}) ``` ## What about new args needed in my custom attention function? But indeed, what if the new function requires a new arg to be properly used? It's no issue! Models supporting the `AttentionInterface` propagate kwargs all the way to the Attention layers, and to the used attention function. That way, you can simply pass the arg (as a kwargs, i.e. you need to qualify the name of the arg) in the model's forward, and it will be correctly used in the attention. However, custom attention functions have some limitations. In particular, it must follow the signature and return format of other attention functions, i.e. ```python from transformers import AutoModelForCausalLM, AttentionInterface from transformers.integrations.sdpa_attention import sdpa_attention_forward import torch def custom_attention( module: torch.nn.Module, # required arg query: torch.Tensor, # required arg key: torch.Tensor, # required arg value: torch.Tensor, # required arg attention_mask: Optional[torch.Tensor], # required arg a_new_kwargs = None, # You can now add as many kwargs as you need another_new_kwargs = None, # You can now add as many kwargs as you need **kwargs, # You need to accept **kwargs as models will pass other args ) -> tuple[torch.Tensor, Optional[torch.Tensor]] ... # do your magic! return attn_output, attn_weights # attn_weights are optional here AttentionInterface.register("custom", custom_attention) model = AutoModelForCausalLM.from_pretrained(model_id, attn_implementation="custom") # Forward pass with the new kwargs model(torch.ones(1, 5, dtype=int), a_new_kwargs=..., another_new_kwargs=...) ``` If in doubt about what args/kwargs a given model sends to the attention function, simply check that model's modeling code on [GitHub](https://github.com/huggingface/transformers/tree/main/src/transformers/models)! ## Accessing current available implementations Most of the time, you will simply need to `register` a new function. If, however, you need to access an existing one, and/or perform a few checks, the preferred way is to use the global `ALL_ATTENTION_FUNCTIONS`. It behaves the same way you would expect from a usual Python dictionary: ```python >>> from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS >>> list(ALL_ATTENTION_FUNCTIONS.keys()) >>> ['flash_attention_2', 'flex_attention', 'sdpa'] >>> ALL_ATTENTION_FUNCTIONS["sdpa"] >>> <function transformers.integrations.sdpa_attention.sdpa_attention_forward> >>> ALL_ATTENTION_FUNCTIONS.get("sdpa", None) >>> <function transformers.integrations.sdpa_attention.sdpa_attention_forward> # You can also globally `register` a new function directly on it >>> ALL_ATTENTION_FUNCTIONS.register("new_func", new_func) ``` ## Attention Mask Interface Having a new attention function may mean that you need a new format of attention mask to decide what key and value tokens the query tokens should attend to. This is now possible with the `AttentionMaskInterface`! It works in the same way as the `AttentionInterface`: ```python from transformers import AttentionMaskInterface from transformers.masking_utils import sdpa_mask import torch def my_new_sdpa_mask(*args, **kwargs): print("I just entered the attention mask computation") return sdpa_mask(*args, **kwargs) AttentionMaskInterface.register("my_new_sdpa_mask", my_new_sdpa_mask) ``` The reason you have to register it is because we need to automatically correct your mask format based on the attention implementation (for example, flex attention uses a BlockMask format, while sdpa uses a 4D tensor). By default, if you do not register an attention mask function along with your attention function, mask creation will be skipped and `attention_mask=None` will be passed along to the Attention layers. The default signature of the attention mask functions is the following: ```python def custom_attention_mask( batch_size: int, # required arg cache_position: torch.Tensor, # required arg kv_length: int, # required arg kv_offset: int = 0, # required arg mask_function: Callable = causal_mask_function, # required arg attention_mask: Optional[torch.Tensor] = None, # required arg **kwargs, # a few additional args may be passed as kwargs, especially the model's config is always passed ) -> Optional[torch.Tensor]: ``` It mostly works thanks to the `mask_function`, which is a `Callable` in the form of [torch's mask_mod functions](https://pytorch.org/blog/flexattention/), taking 4 indices as input and returning a boolean to indicate if this position should take part in the attention computation. If you cannot use the `mask_function` to create your mask for some reason, you can try to work around it by doing something similar to our [torch export workaround](https://github.com/huggingface/transformers/blob/main/src/transformers/integrations/executorch.py).
transformers/docs/source/en/attention_interface.md/0
{ "file_path": "transformers/docs/source/en/attention_interface.md", "repo_id": "transformers", "token_count": 2707 }
362
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Tokenizers Tokenizers convert text into an array of numbers known as tensors, the inputs to a text model. There are several tokenizer algorithms, but they all share the same purpose. Split text into smaller words or subwords (tokens) according to some rules, and convert them into numbers (input ids). A Transformers tokenizer also returns an attention mask to indicate which tokens should be attended to. > [!TIP] > Learn about the most popular tokenization algorithms on the [Summary of the tokenizers](./tokenizer_summary) doc. Call [`~PreTrainedTokenizer.from_pretrained`] to load a tokenizer and its configuration from the Hugging Face [Hub](https://hf.co) or a local directory. The pretrained tokenizer is saved in a [tokenizer.model](https://huggingface.co/google/gemma-2-2b/blob/main/tokenizer.model) file with all its associated vocabulary files. Pass a string of text to the tokenizer to return the input ids and attention mask, and set the framework tensor type to return with the `return_tensors` parameter. ```py from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-2b") tokenizer("We are very happy to show you the 🤗 Transformers library", return_tensors="pt") {'input_ids': tensor([[ 2, 1734, 708, 1508, 4915, 577, 1500, 692, 573, 156808, 128149, 9581, 235265]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) } ``` Whichever tokenizer you use, make sure the tokenizer vocabulary is the same as the pretrained models tokenizer vocabulary. This is especially important if you're using a custom tokenizer with a different vocabulary from the pretrained models tokenizer. This guide provides a brief overview of the tokenizer classes and how to preprocess text with it. ## Tokenizer classes All tokenizers inherit from a [`PreTrainedTokenizerBase`] class that provides common methods for all tokenizers like [`~PreTrainedTokenizerBase.from_pretrained`] and [`~PreTrainedTokenizerBase.batch_decode`]. There are two main tokenizer classes that build on top of the base class. - [`PreTrainedTokenizer`] is a Python implementation, for example [`LlamaTokenizer`]. - [`PreTrainedTokenizerFast`] is a fast Rust-based implementation from the [Tokenizers](https://hf.co/docs/tokenizers/index) library, for example [`LlamaTokenizerFast`]. There are two ways you can load a tokenizer, with [`AutoTokenizer`] or a model-specific tokenizer. <hfoptions id="tokenizer-classes"> <hfoption id="AutoTokenizer"> The [AutoClass](./model_doc/auto) API is a fast and easy way to load a tokenizer without needing to know whether a Python or Rust-based implementation is available. By default, [`AutoTokenizer`] tries to load a fast tokenizer if it's available, otherwise, it loads the Python implementation. Use [`~PreTrainedTokenizer.from_pretrained`] to load a tokenizer. ```py from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-2b") tokenizer("We are very happy to show you the 🤗 Transformers library.", return_tensors="pt") {'input_ids': tensor([[ 2, 1734, 708, 1508, 4915, 577, 1500, 692, 573, 156808, 128149, 9581, 235265]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) } ``` Load your own tokenizer by passing its vocabulary file to [`~AutoTokenizer.from_pretrained`]. ```py from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("./model_directory/my_vocab_file.txt") ``` </hfoption> <hfoption id="model-specific tokenizer"> Each pretrained model is associated with a tokenizer and the specific vocabulary it was trained on. A tokenizer can be loaded directly from the model-specific class. > [!TIP] > Refer to a models API documentation to check whether a fast tokenizer is supported. ```py from transformers import GemmaTokenizer tokenizer = GemmaTokenizer.from_pretrained("google/gemma-2-2b") tokenizer("We are very happy to show you the 🤗 Transformers library.", return_tensors="pt") ``` To load a fast tokenizer, use the fast implementation class. ```py from transformers import GemmaTokenizerFast tokenizer = GemmaTokenizerFast.from_pretrained("google/gemma-2-2b") tokenizer("We are very happy to show you the 🤗 Transformers library.", return_tensors="pt") ``` Load your own tokenizer by passing its vocabulary file to the `vocab_file` parameter. ```py from transformers import GemmaTokenizerFast tokenizer = GemmaTokenizerFast(vocab_file="my_vocab_file.txt") ``` </hfoption> </hfoptions> ## Multimodal tokenizers In addition to text tokens, multimodal tokenizers also holds tokens from other modalities as a part of its attributes for easy access. To add these special tokens to a tokenizer, pass them as a dictionary to the `extra_special_tokens` parameter in [`~AutoTokenizer.from_pretrained`]. The example below adds the `image_token` to a vision-language model. Save the tokenizer so you can reuse it with direct access to the `image_token`, `boi_token`, and `eoi_token`. ```py vision_tokenizer = AutoTokenizer.from_pretrained( "llava-hf/llava-1.5-7b-hf", extra_special_tokens={"image_token": "<image>", "boi_token": "<image_start>", "eoi_token": "<image_end>"} ) print(vision_tokenizer.image_token, vision_tokenizer.image_token_id) ("<image>", 32000) vision_tokenizer.save_pretrained("./path/to/tokenizer") ``` ## Fast tokenizers <Youtube id="3umI3tm27Vw"/> [`PreTrainedTokenizerFast`] or *fast tokenizers* are Rust-based tokenizers from the [Tokenizers](https://hf.co/docs/tokenizers) library. It is significantly faster at batched tokenization and provides additional alignment methods compared to the Python-based tokenizers. [`AutoTokenizer`] automatically loads a fast tokenizer if it's supported. Otherwise, you need to explicitly load the fast tokenizer. This section will show you how to train a fast tokenizer and reuse it in Transformers. To train a Byte-Pair Encoding (BPE) tokenizer, create a [`~tokenizers.Tokenizer`] and [`~tokenizers.trainers.BpeTrainer`] class and define the unknown token and special tokens. ```py from tokenizers import Tokenizer from tokenizers.models import BPE from tokenizers.trainers import BpeTrainer tokenizer = Tokenizer(BPE(unk_token="[UNK]")) trainer = BpeTrainer(special_tokens=["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"]) ``` Split the tokens on [`~tokenizers.pre_tokenizers.Whitespace`] to create tokens that don't overlap with each other. ```py from tokenizers.pre_tokenizers import Whitespace tokenizer.pre_tokenizer = Whitespace() ``` Call [`~tokenizers.Tokenizer.train`] on the text files and trainer to start training. ```py files = [...] tokenizer.train(files, trainer) ``` Use [`~tokenizers.Tokenizer.save`] to save the tokenizers configuration and vocabulary to a JSON file. ```py tokenizer.save("tokenizer.json") ``` Now you can load and reuse the tokenizer object in Transformers by passing it to the `tokenizer_object` parameter in [`PreTrainedTokenizerFast`]. ```py from transformers import PreTrainedTokenizerFast fast_tokenizer = PreTrainedTokenizerFast(tokenizer_object=tokenizer) ``` To load a saved tokenizer from its JSON file, pass the file path to the `tokenizer_file` parameter in [`PreTrainedTokenizerFast`]. ```py from transformers import PreTrainedTokenizerFast fast_tokenizer = PreTrainedTokenizerFast(tokenizer_file="tokenizer.json") ``` ## tiktoken [tiktoken](https://github.com/openai/tiktoken) is a [byte-pair encoding (BPE)](./tokenizer_summary#byte-pair-encoding-bpe) tokenizer by OpenAI. It includes several tokenization schemes or encodings for how text should be tokenized. There are currently two models trained and released with tiktoken, GPT2 and Llama3. Transformers supports models with a [tokenizer.model](https://hf.co/meta-llama/Meta-Llama-3-8B/blob/main/original/tokenizer.model) tiktoken file. The tiktoken file is automatically converted into Transformers Rust-based [`PreTrainedTokenizerFast`]. Add the `subfolder` parameter to [`~PreTrainedModel.from_pretrained`] to specify where the `tokenizer.model` tiktoken file is located. ```py from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct", subfolder="original") ``` ### Create a tiktoken tokenizer The tiktoken `tokenizer.model` file contains no information about additional tokens or pattern strings. If these are important, convert the tokenizer to `tokenizer.json` (the appropriate format for [`PreTrainedTokenizerFast`]). Generate the tiktoken `tokenizer.model` file with the [tiktoken.get_encoding](https://github.com/openai/tiktoken/blob/63527649963def8c759b0f91f2eb69a40934e468/tiktoken/registry.py#L63) function, and convert it to `tokenizer.json` with [convert_tiktoken_to_fast](https://github.com/huggingface/transformers/blob/99e0ab6ed888136ea4877c6d8ab03690a1478363/src/transformers/integrations/tiktoken.py#L8). ```py from transformers.integrations.tiktoken import convert_tiktoken_to_fast from tiktoken import get_encoding # Load your custom encoding or the one provided by OpenAI encoding = get_encoding("gpt2") convert_tiktoken_to_fast(encoding, "config/save/dir") ``` The resulting `tokenizer.json` file is saved to the specified directory and loaded with [`~PreTrainedTokenizerFast.from_pretrained`]. ```py tokenizer = PreTrainedTokenizerFast.from_pretrained("config/save/dir") ``` ## Preprocess <Youtube id="Yffk5aydLzg"/> A Transformers model expects the input to be a PyTorch, TensorFlow, or NumPy tensor. A tokenizers job is to preprocess text into those tensors. Specify the framework tensor type to return with the `return_tensors` parameter. ```py from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-2b") tokenizer("We are very happy to show you the 🤗 Transformers library.", return_tensors="pt") {'input_ids': tensor([[ 2, 1734, 708, 1508, 4915, 577, 1500, 692, 573, 156808, 128149, 9581, 235265]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) } ``` The tokenization process of converting text into input ids is completed in two steps. <hfoptions id="steps"> <hfoption id="1. tokenize"> In the first step, a string of text is split into tokens by the [`~PreTrainedTokenizer.tokenize`] function. How the text is split depends on the tokenization algorithm. ```py tokens = tokenizer.tokenize("We are very happy to show you the 🤗 Transformers library") print(tokens) ['We', '▁are', '▁very', '▁happy', '▁to', '▁show', '▁you', '▁the', '▁🤗', '▁Transformers', '▁library'] ``` Gemma uses a [SentencePiece](./tokenizer_summary#sentencepiece) tokenizer which replaces spaces with an underscore `_`. </hfoption> <hfoption id="2. convert tokens to ids"> In the second step, the tokens are converted into ids with [`~PreTrainedTokenizer.convert_tokens_to_ids`]. ```py ids = tokenizer.convert_tokens_to_ids(tokens) print(ids) [1734, 708, 1508, 4915, 577, 1500, 692, 573, 156808, 128149, 9581] ``` </hfoption> <hfoption id="3. decode ids to text"> Lastly, the model prediction typically generates numerical outputs which are converted back to text with [`~PreTrainedTokenizer.decode`]. ```py decoded_string = tokenizer.decode(ids) print(decoded_string) 'We are very happy to show you the 🤗 Transformers library' ``` </hfoption> </hfoptions> > [!TIP] > Visualize how different tokenizers work in the [Tokenizer Playground](https://xenova-the-tokenizer-playground.static.hf.space). ### Special tokens Special tokens provide the model with some additional information about the text. For example, if you compare the tokens obtained from passing text directly to the tokenizer and from [`~PreTrainedTokenizer.convert_tokens_to_ids`], you'll notice some additional tokens are added. ```py model_inputs = tokenizer("We are very happy to show you the 🤗 Transformers library.") [2, 1734, 708, 1508, 4915, 577, 1500, 692, 573, 156808, 128149, 9581] tokenizer.convert_tokens_to_ids(tokens) [1734, 708, 1508, 4915, 577, 1500, 692, 573, 156808, 128149, 9581] ``` When you [`~PreTrainedTokenizer.decode`] the ids, you'll see `<bos>` at the beginning of the string. This is used to indicate the beginning of a sentence to the model. ```py print(tokenizer.decode(model_inputs["input_ids"])) print(tokenizer.decode(ids)) '<bos>We are very happy to show you the 🤗 Transformers library.' 'We are very happy to show you the 🤗 Transformers library' ``` Not all models need special tokens, but if they do, a tokenizer automatically adds them. ### Batch tokenization It is faster and more efficient to preprocess *batches* of text instead of a single sentence at a time. Fast tokenizers are especially good at parallelizing tokenization. Pass a list of string text to the tokenizer. ```py batch_sentences = [ "But what about second breakfast?", "Don't think he knows about second breakfast, Pip.", "What about elevensies?", ] encoded_inputs = tokenizer(batch_sentences, return_tensors="pt") print(encoded_inputs) { 'input_ids': [[2, 1860, 1212, 1105, 2257, 14457, 235336], [2, 4454, 235303, 235251, 1742, 693, 9242, 1105, 2257, 14457, 235269, 48782, 235265], [2, 1841, 1105, 29754, 37453, 235336]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1]] } ``` ### Padding > [!TIP] > Learn about additional padding strategies in the [Padding and truncation](./pad_truncation) guide. In the output above, the `input_ids` have different lengths. This is an issue because Transformers expects them to have the same lengths so it can pack them into a batch. Sequences with uneven lengths can't be batched. Padding adds a special *padding token* to ensure all sequences have the same length. Set `padding=True` to pad the sequences to the longest sequence length in the batch. ```py encoded_inputs = tokenizer(batch_sentences, padding=True, return_tensors="pt") print(encoded_inputs) ``` The tokenizer added the special padding token `0` to the left side (*left padding*) because Gemma and LLMs in general are not trained to continue generation from a padding token. ### Truncation > [!TIP] > Learn about additional truncation strategies in the [Padding and truncation](./pad_truncation) guide. Models are only able to process sequences up to a certain length. If you try to process a sequence longer than a model can handle, it crashes. Truncation removes tokens from a sequence to ensure it doesn't exceed the maximum length. Set `truncation=True` to truncate a sequence to the maximum length accepted by the model. You can also set the maximum length yourself with the `max_length` parameter. ```py encoded_inputs = tokenizer(batch_sentences, max_length=8, truncation=True, return_tensors="pt") print(encoded_inputs) ```
transformers/docs/source/en/fast_tokenizers.md/0
{ "file_path": "transformers/docs/source/en/fast_tokenizers.md", "repo_id": "transformers", "token_count": 4994 }
363
<!--Copyright 2025 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Import Utilities This page goes through the transformers utilities to enable lazy and fast object import. While we strive for minimal dependencies, some models have specific dependencies requirements that cannot be worked around. We don't want for all users of `transformers` to have to install those dependencies to use other models, we therefore mark those as soft dependencies rather than hard dependencies. The transformers toolkit is not made to error-out on import of a model that has a specific dependency; instead, an object for which you are lacking a dependency will error-out when calling any method on it. As an example, if `torchvision` isn't installed, the fast image processors will not be available. This object is still importable: ```python >>> from transformers import DetrImageProcessorFast >>> print(DetrImageProcessorFast) <class 'DetrImageProcessorFast'> ``` However, no method can be called on that object: ```python >>> DetrImageProcessorFast.from_pretrained() ImportError: DetrImageProcessorFast requires the Torchvision library but it was not found in your environment. Check out the instructions on the installation page: https://pytorch.org/get-started/locally/ and follow the ones that match your environment. Please note that you may need to restart your runtime after installation. ``` Let's see how to specify specific object dependencies. ## Specifying Object Dependencies ### Filename-based All objects under a given filename have an automatic dependency to the tool linked to the filename **TensorFlow**: All files starting with `modeling_tf_` have an automatic TensorFlow dependency. **Flax**: All files starting with `modeling_flax_` have an automatic Flax dependency **PyTorch**: All files starting with `modeling_` and not valid with the above (TensorFlow and Flax) have an automatic PyTorch dependency **Tokenizers**: All files starting with `tokenization_` and ending with `_fast` have an automatic `tokenizers` dependency **Vision**: All files starting with `image_processing_` have an automatic dependency to the `vision` dependency group; at the time of writing, this only contains the `pillow` dependency. **Vision + Torch + Torchvision**: All files starting with `image_processing_` and ending with `_fast` have an automatic dependency to `vision`, `torch`, and `torchvision`. All of these automatic dependencies are added on top of the explicit dependencies that are detailed below. ### Explicit Object Dependencies We add a method called `requires` that is used to explicitly specify the dependencies of a given object. As an example, the `Trainer` class has two hard dependencies: `torch` and `accelerate`. Here is how we specify these required dependencies: ```python from .utils.import_utils import requires @requires(backends=("torch", "accelerate")) class Trainer: ... ``` Backends that can be added here are all the backends that are available in the `import_utils.py` module. Additionally, specific versions can be specified in each backend. For example, this is how you would specify a requirement on torch>=2.6 on the `Trainer` class: ```python from .utils.import_utils import requires @requires(backends=("torch>=2.6", "accelerate")) class Trainer: ... ``` You can specify the following operators: `==`, `>`, `>=`, `<`, `<=`, `!=`. ## Methods [[autodoc]] utils.import_utils.define_import_structure [[autodoc]] utils.import_utils.requires
transformers/docs/source/en/internal/import_utils.md/0
{ "file_path": "transformers/docs/source/en/internal/import_utils.md", "repo_id": "transformers", "token_count": 1079 }
364
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # DeepSpeed [DeepSpeed](https://github.com/deepspeedai/DeepSpeed), powered by Zero Redundancy Optimizer (ZeRO), is an optimization library for training and fitting very large models onto a GPU. It is available in several ZeRO stages, where each stage progressively saves more GPU memory by partitioning the optimizer state, gradients, parameters, and enabling offloading to a CPU or NVMe. DeepSpeed is integrated with the [`Trainer`] class and most of the setup is automatically taken care of for you. However, if you want to use DeepSpeed without the [`Trainer`], Transformers provides a [`HfDeepSpeedConfig`] class. <Tip> Learn more about using DeepSpeed with [`Trainer`] in the [DeepSpeed](../deepspeed) guide. </Tip> ## HfDeepSpeedConfig [[autodoc]] integrations.HfDeepSpeedConfig - all
transformers/docs/source/en/main_classes/deepspeed.md/0
{ "file_path": "transformers/docs/source/en/main_classes/deepspeed.md", "repo_id": "transformers", "token_count": 402 }
365