text
stringlengths
7
1.24M
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
519
""" Sequencer Paper: `Sequencer: Deep LSTM for Image Classification` - https://arxiv.org/pdf/2205.01972.pdf """ # Copyright (c) 2022. Yuki Tatsunami # Licensed under the Apache License, Version 2.0 (the "License"); import math from functools import partial from itertools import accumulate from typing import Optional, Tuple import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, DEFAULT_CROP_PCT from timm.layers import lecun_normal_, DropPath, Mlp, PatchEmbed, ClassifierHead from ._builder import build_model_with_cfg from ._manipulate import named_apply from ._registry import register_model, generate_default_cfgs __all__ = ['Sequencer2d'] # model_registry will add each entrypoint fn to this def _init_weights(module: nn.Module, name: str, head_bias: float = 0., flax=False): if isinstance(module, nn.Linear): if name.startswith('head'): nn.init.zeros_(module.weight) nn.init.constant_(module.bias, head_bias) else: if flax: # Flax defaults lecun_normal_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) else: nn.init.xavier_uniform_(module.weight) if module.bias is not None: if 'mlp' in name: nn.init.normal_(module.bias, std=1e-6) else: nn.init.zeros_(module.bias) elif isinstance(module, nn.Conv2d): lecun_normal_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, (nn.LayerNorm, nn.BatchNorm2d, nn.GroupNorm)): nn.init.ones_(module.weight) nn.init.zeros_(module.bias) elif isinstance(module, (nn.RNN, nn.GRU, nn.LSTM)): stdv = 1.0 / math.sqrt(module.hidden_size) for weight in module.parameters(): nn.init.uniform_(weight, -stdv, stdv) elif hasattr(module, 'init_weights'): module.init_weights() class RNNIdentity(nn.Module): def __init__(self, *args, **kwargs): super(RNNIdentity, self).__init__() def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, None]: return x, None class RNN2dBase(nn.Module): def __init__( self, input_size: int, hidden_size: int, num_layers: int = 1, bias: bool = True, bidirectional: bool = True, union="cat", with_fc=True, ): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.output_size = 2 * hidden_size if bidirectional else hidden_size self.union = union self.with_vertical = True self.with_horizontal = True self.with_fc = with_fc self.fc = None if with_fc: if union == "cat": self.fc = nn.Linear(2 * self.output_size, input_size) elif union == "add": self.fc = nn.Linear(self.output_size, input_size) elif union == "vertical": self.fc = nn.Linear(self.output_size, input_size) self.with_horizontal = False elif union == "horizontal": self.fc = nn.Linear(self.output_size, input_size) self.with_vertical = False else: raise ValueError("Unrecognized union: " + union) elif union == "cat": pass if 2 * self.output_size != input_size: raise ValueError(f"The output channel {2 * self.output_size} is different from the input channel {input_size}.") elif union == "add": pass if self.output_size != input_size: raise ValueError(f"The output channel {self.output_size} is different from the input channel {input_size}.") elif union == "vertical": if self.output_size != input_size: raise ValueError(f"The output channel {self.output_size} is different from the input channel {input_size}.") self.with_horizontal = False elif union == "horizontal": if self.output_size != input_size: raise ValueError(f"The output channel {self.output_size} is different from the input channel {input_size}.") self.with_vertical = False else: raise ValueError("Unrecognized union: " + union) self.rnn_v = RNNIdentity() self.rnn_h = RNNIdentity() def forward(self, x): B, H, W, C = x.shape if self.with_vertical: v = x.permute(0, 2, 1, 3) v = v.reshape(-1, H, C) v, _ = self.rnn_v(v) v = v.reshape(B, W, H, -1) v = v.permute(0, 2, 1, 3) else: v = None if self.with_horizontal: h = x.reshape(-1, W, C) h, _ = self.rnn_h(h) h = h.reshape(B, H, W, -1) else: h = None if v is not None and h is not None: if self.union == "cat": x = torch.cat([v, h], dim=-1) else: x = v + h elif v is not None: x = v elif h is not None: x = h if self.fc is not None: x = self.fc(x) return x class LSTM2d(RNN2dBase): def __init__( self, input_size: int, hidden_size: int, num_layers: int = 1, bias: bool = True, bidirectional: bool = True, union="cat", with_fc=True, ): super().__init__(input_size, hidden_size, num_layers, bias, bidirectional, union, with_fc) if self.with_vertical: self.rnn_v = nn.LSTM( input_size, hidden_size, num_layers, batch_first=True, bias=bias, bidirectional=bidirectional, ) if self.with_horizontal: self.rnn_h = nn.LSTM( input_size, hidden_size, num_layers, batch_first=True, bias=bias, bidirectional=bidirectional, ) class Sequencer2dBlock(nn.Module): def __init__( self, dim, hidden_size, mlp_ratio=3.0, rnn_layer=LSTM2d, mlp_layer=Mlp, norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, num_layers=1, bidirectional=True, union="cat", with_fc=True, drop=0., drop_path=0., ): super().__init__() channels_dim = int(mlp_ratio * dim) self.norm1 = norm_layer(dim) self.rnn_tokens = rnn_layer( dim, hidden_size, num_layers=num_layers, bidirectional=bidirectional, union=union, with_fc=with_fc, ) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) self.mlp_channels = mlp_layer(dim, channels_dim, act_layer=act_layer, drop=drop) def forward(self, x): x = x + self.drop_path(self.rnn_tokens(self.norm1(x))) x = x + self.drop_path(self.mlp_channels(self.norm2(x))) return x class Shuffle(nn.Module): def __init__(self): super().__init__() def forward(self, x): if self.training: B, H, W, C = x.shape r = torch.randperm(H * W) x = x.reshape(B, -1, C) x = x[:, r, :].reshape(B, H, W, -1) return x class Downsample2d(nn.Module): def __init__(self, input_dim, output_dim, patch_size): super().__init__() self.down = nn.Conv2d(input_dim, output_dim, kernel_size=patch_size, stride=patch_size) def forward(self, x): x = x.permute(0, 3, 1, 2) x = self.down(x) x = x.permute(0, 2, 3, 1) return x class Sequencer2dStage(nn.Module): def __init__( self, dim, dim_out, depth, patch_size, hidden_size, mlp_ratio, downsample=False, block_layer=Sequencer2dBlock, rnn_layer=LSTM2d, mlp_layer=Mlp, norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, num_layers=1, bidirectional=True, union="cat", with_fc=True, drop=0., drop_path=0., ): super().__init__() if downsample: self.downsample = Downsample2d(dim, dim_out, patch_size) else: assert dim == dim_out self.downsample = nn.Identity() blocks = [] for block_idx in range(depth): blocks.append(block_layer( dim_out, hidden_size, mlp_ratio=mlp_ratio, rnn_layer=rnn_layer, mlp_layer=mlp_layer, norm_layer=norm_layer, act_layer=act_layer, num_layers=num_layers, bidirectional=bidirectional, union=union, with_fc=with_fc, drop=drop, drop_path=drop_path[block_idx] if isinstance(drop_path, (list, tuple)) else drop_path, )) self.blocks = nn.Sequential(*blocks) def forward(self, x): x = self.downsample(x) x = self.blocks(x) return x class Sequencer2d(nn.Module): def __init__( self, num_classes=1000, img_size=224, in_chans=3, global_pool='avg', layers=(4, 3, 8, 3), patch_sizes=(7, 2, 2, 1), embed_dims=(192, 384, 384, 384), hidden_sizes=(48, 96, 96, 96), mlp_ratios=(3.0, 3.0, 3.0, 3.0), block_layer=Sequencer2dBlock, rnn_layer=LSTM2d, mlp_layer=Mlp, norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, num_rnn_layers=1, bidirectional=True, union="cat", with_fc=True, drop_rate=0., drop_path_rate=0., nlhb=False, stem_norm=False, ): super().__init__() assert global_pool in ('', 'avg') self.num_classes = num_classes self.global_pool = global_pool self.num_features = self.head_hidden_size = embed_dims[-1] # for consistency with other models self.feature_dim = -1 # channel dim index for feature outputs (rank 4, NHWC) self.output_fmt = 'NHWC' self.feature_info = [] self.stem = PatchEmbed( img_size=None, patch_size=patch_sizes[0], in_chans=in_chans, embed_dim=embed_dims[0], norm_layer=norm_layer if stem_norm else None, flatten=False, output_fmt='NHWC', ) assert len(layers) == len(patch_sizes) == len(embed_dims) == len(hidden_sizes) == len(mlp_ratios) reductions = list(accumulate(patch_sizes, lambda x, y: x * y)) stages = [] prev_dim = embed_dims[0] for i, _ in enumerate(embed_dims): stages += [Sequencer2dStage( prev_dim, embed_dims[i], depth=layers[i], downsample=i > 0, patch_size=patch_sizes[i], hidden_size=hidden_sizes[i], mlp_ratio=mlp_ratios[i], block_layer=block_layer, rnn_layer=rnn_layer, mlp_layer=mlp_layer, norm_layer=norm_layer, act_layer=act_layer, num_layers=num_rnn_layers, bidirectional=bidirectional, union=union, with_fc=with_fc, drop=drop_rate, drop_path=drop_path_rate, )] prev_dim = embed_dims[i] self.feature_info += [dict(num_chs=prev_dim, reduction=reductions[i], module=f'stages.{i}')] self.stages = nn.Sequential(*stages) self.norm = norm_layer(embed_dims[-1]) self.head = ClassifierHead( self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate, input_fmt=self.output_fmt, ) self.init_weights(nlhb=nlhb) def init_weights(self, nlhb=False): head_bias = -math.log(self.num_classes) if nlhb else 0. named_apply(partial(_init_weights, head_bias=head_bias), module=self) # depth-first @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^stem', blocks=[ (r'^stages\.(\d+)', None), (r'^norm', (99999,)) ] if coarse else [ (r'^stages\.(\d+)\.blocks\.(\d+)', None), (r'^stages\.(\d+)\.downsample', (0,)), (r'^norm', (99999,)) ] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): self.num_classes = num_classes self.head.reset(num_classes, pool_type=global_pool) def forward_features(self, x): x = self.stem(x) x = self.stages(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=True) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict, model): """ Remap original checkpoints -> timm """ if 'stages.0.blocks.0.norm1.weight' in state_dict: return state_dict # already translated checkpoint if 'model' in state_dict: state_dict = state_dict['model'] import re out_dict = {} for k, v in state_dict.items(): k = re.sub(r'blocks.([0-9]+).([0-9]+).down', lambda x: f'stages.{int(x.group(1)) + 1}.downsample.down', k) k = re.sub(r'blocks.([0-9]+).([0-9]+)', r'stages.\1.blocks.\2', k) k = k.replace('head.', 'head.fc.') out_dict[k] = v return out_dict def _create_sequencer2d(variant, pretrained=False, **kwargs): default_out_indices = tuple(range(3)) out_indices = kwargs.pop('out_indices', default_out_indices) model = build_model_with_cfg( Sequencer2d, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs, ) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': DEFAULT_CROP_PCT, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.proj', 'classifier': 'head.fc', **kwargs } default_cfgs = generate_default_cfgs({ 'sequencer2d_s.in1k': _cfg(hf_hub_id='timm/'), 'sequencer2d_m.in1k': _cfg(hf_hub_id='timm/'), 'sequencer2d_l.in1k': _cfg(hf_hub_id='timm/'), }) @register_model def sequencer2d_s(pretrained=False, **kwargs) -> Sequencer2d: model_args = dict( layers=[4, 3, 8, 3], patch_sizes=[7, 2, 1, 1], embed_dims=[192, 384, 384, 384], hidden_sizes=[48, 96, 96, 96], mlp_ratios=[3.0, 3.0, 3.0, 3.0], rnn_layer=LSTM2d, bidirectional=True, union="cat", with_fc=True, ) model = _create_sequencer2d('sequencer2d_s', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def sequencer2d_m(pretrained=False, **kwargs) -> Sequencer2d: model_args = dict( layers=[4, 3, 14, 3], patch_sizes=[7, 2, 1, 1], embed_dims=[192, 384, 384, 384], hidden_sizes=[48, 96, 96, 96], mlp_ratios=[3.0, 3.0, 3.0, 3.0], rnn_layer=LSTM2d, bidirectional=True, union="cat", with_fc=True, **kwargs) model = _create_sequencer2d('sequencer2d_m', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def sequencer2d_l(pretrained=False, **kwargs) -> Sequencer2d: model_args = dict( layers=[8, 8, 16, 4], patch_sizes=[7, 2, 1, 1], embed_dims=[192, 384, 384, 384], hidden_sizes=[48, 96, 96, 96], mlp_ratios=[3.0, 3.0, 3.0, 3.0], rnn_layer=LSTM2d, bidirectional=True, union="cat", with_fc=True, **kwargs) model = _create_sequencer2d('sequencer2d_l', pretrained=pretrained, **dict(model_args, **kwargs)) return model
pytorch-image-models/timm/models/sequencer.py/0
{ "file_path": "pytorch-image-models/timm/models/sequencer.py", "repo_id": "pytorch-image-models", "token_count": 9247 }
232
""" Vision OutLOoker (VOLO) implementation Paper: `VOLO: Vision Outlooker for Visual Recognition` - https://arxiv.org/abs/2106.13112 Code adapted from official impl at https://github.com/sail-sg/volo, original copyright in comment below Modifications and additions for timm by / Copyright 2022, Ross Wightman """ # Copyright 2021 Sea Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import List, Optional, Tuple, Union import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.checkpoint import checkpoint from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import DropPath, Mlp, to_2tuple, to_ntuple, trunc_normal_, use_fused_attn from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._registry import register_model, generate_default_cfgs __all__ = ['VOLO'] # model_registry will add each entrypoint fn to this class OutlookAttention(nn.Module): def __init__( self, dim, num_heads, kernel_size=3, padding=1, stride=1, qkv_bias=False, attn_drop=0., proj_drop=0., ): super().__init__() head_dim = dim // num_heads self.num_heads = num_heads self.kernel_size = kernel_size self.padding = padding self.stride = stride self.scale = head_dim ** -0.5 self.v = nn.Linear(dim, dim, bias=qkv_bias) self.attn = nn.Linear(dim, kernel_size ** 4 * num_heads) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) self.unfold = nn.Unfold(kernel_size=kernel_size, padding=padding, stride=stride) self.pool = nn.AvgPool2d(kernel_size=stride, stride=stride, ceil_mode=True) def forward(self, x): B, H, W, C = x.shape v = self.v(x).permute(0, 3, 1, 2) # B, C, H, W h, w = math.ceil(H / self.stride), math.ceil(W / self.stride) v = self.unfold(v).reshape( B, self.num_heads, C // self.num_heads, self.kernel_size * self.kernel_size, h * w).permute(0, 1, 4, 3, 2) # B,H,N,kxk,C/H attn = self.pool(x.permute(0, 3, 1, 2)).permute(0, 2, 3, 1) attn = self.attn(attn).reshape( B, h * w, self.num_heads, self.kernel_size * self.kernel_size, self.kernel_size * self.kernel_size).permute(0, 2, 1, 3, 4) # B,H,N,kxk,kxk attn = attn * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).permute(0, 1, 4, 3, 2).reshape(B, C * self.kernel_size * self.kernel_size, h * w) x = F.fold(x, output_size=(H, W), kernel_size=self.kernel_size, padding=self.padding, stride=self.stride) x = self.proj(x.permute(0, 2, 3, 1)) x = self.proj_drop(x) return x class Outlooker(nn.Module): def __init__( self, dim, kernel_size, padding, stride=1, num_heads=1, mlp_ratio=3., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, qkv_bias=False, ): super().__init__() self.norm1 = norm_layer(dim) self.attn = OutlookAttention( dim, num_heads, kernel_size=kernel_size, padding=padding, stride=stride, qkv_bias=qkv_bias, attn_drop=attn_drop, ) self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = Mlp( in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, ) self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x): x = x + self.drop_path1(self.attn(self.norm1(x))) x = x + self.drop_path2(self.mlp(self.norm2(x))) return x class Attention(nn.Module): fused_attn: torch.jit.Final[bool] def __init__( self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0., ): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.fused_attn = use_fused_attn() self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): B, H, W, C = x.shape qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv.unbind(0) if self.fused_attn: x = F.scaled_dot_product_attention( q, k, v, dropout_p=self.attn_drop.p if self.training else 0., ) else: q = q * self.scale attn = q @ k.transpose(-2, -1) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.transpose(1, 2).reshape(B, H, W, C) x = self.proj(x) x = self.proj_drop(x) return x class Transformer(nn.Module): def __init__( self, dim, num_heads, mlp_ratio=4., qkv_bias=False, attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, ): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop) self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer) self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x): x = x + self.drop_path1(self.attn(self.norm1(x))) x = x + self.drop_path2(self.mlp(self.norm2(x))) return x class ClassAttention(nn.Module): def __init__( self, dim, num_heads=8, head_dim=None, qkv_bias=False, attn_drop=0., proj_drop=0., ): super().__init__() self.num_heads = num_heads if head_dim is not None: self.head_dim = head_dim else: head_dim = dim // num_heads self.head_dim = head_dim self.scale = head_dim ** -0.5 self.kv = nn.Linear(dim, self.head_dim * self.num_heads * 2, bias=qkv_bias) self.q = nn.Linear(dim, self.head_dim * self.num_heads, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(self.head_dim * self.num_heads, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): B, N, C = x.shape kv = self.kv(x).reshape(B, N, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) k, v = kv.unbind(0) q = self.q(x[:, :1, :]).reshape(B, self.num_heads, 1, self.head_dim) * self.scale attn = q @ k.transpose(-2, -1) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) cls_embed = (attn @ v).transpose(1, 2).reshape(B, 1, self.head_dim * self.num_heads) cls_embed = self.proj(cls_embed) cls_embed = self.proj_drop(cls_embed) return cls_embed class ClassBlock(nn.Module): def __init__( self, dim, num_heads, head_dim=None, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, ): super().__init__() self.norm1 = norm_layer(dim) self.attn = ClassAttention( dim, num_heads=num_heads, head_dim=head_dim, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, ) self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = Mlp( in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop, ) self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x): cls_embed = x[:, :1] cls_embed = cls_embed + self.drop_path1(self.attn(self.norm1(x))) cls_embed = cls_embed + self.drop_path2(self.mlp(self.norm2(cls_embed))) return torch.cat([cls_embed, x[:, 1:]], dim=1) def get_block(block_type, **kargs): if block_type == 'ca': return ClassBlock(**kargs) def rand_bbox(size, lam, scale=1): """ get bounding box as token labeling (https://github.com/zihangJiang/TokenLabeling) return: bounding box """ W = size[1] // scale H = size[2] // scale cut_rat = np.sqrt(1. - lam) cut_w = (W * cut_rat).astype(int) cut_h = (H * cut_rat).astype(int) # uniform cx = np.random.randint(W) cy = np.random.randint(H) bbx1 = np.clip(cx - cut_w // 2, 0, W) bby1 = np.clip(cy - cut_h // 2, 0, H) bbx2 = np.clip(cx + cut_w // 2, 0, W) bby2 = np.clip(cy + cut_h // 2, 0, H) return bbx1, bby1, bbx2, bby2 class PatchEmbed(nn.Module): """ Image to Patch Embedding. Different with ViT use 1 conv layer, we use 4 conv layers to do patch embedding """ def __init__( self, img_size=224, stem_conv=False, stem_stride=1, patch_size=8, in_chans=3, hidden_dim=64, embed_dim=384, ): super().__init__() assert patch_size in [4, 8, 16] if stem_conv: self.conv = nn.Sequential( nn.Conv2d(in_chans, hidden_dim, kernel_size=7, stride=stem_stride, padding=3, bias=False), # 112x112 nn.BatchNorm2d(hidden_dim), nn.ReLU(inplace=True), nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, stride=1, padding=1, bias=False), # 112x112 nn.BatchNorm2d(hidden_dim), nn.ReLU(inplace=True), nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, stride=1, padding=1, bias=False), # 112x112 nn.BatchNorm2d(hidden_dim), nn.ReLU(inplace=True), ) else: self.conv = None self.proj = nn.Conv2d( hidden_dim, embed_dim, kernel_size=patch_size // stem_stride, stride=patch_size // stem_stride) self.num_patches = (img_size // patch_size) * (img_size // patch_size) def forward(self, x): if self.conv is not None: x = self.conv(x) x = self.proj(x) # B, C, H, W return x class Downsample(nn.Module): """ Image to Patch Embedding, downsampling between stage1 and stage2 """ def __init__(self, in_embed_dim, out_embed_dim, patch_size=2): super().__init__() self.proj = nn.Conv2d(in_embed_dim, out_embed_dim, kernel_size=patch_size, stride=patch_size) def forward(self, x): x = x.permute(0, 3, 1, 2) x = self.proj(x) # B, C, H, W x = x.permute(0, 2, 3, 1) return x def outlooker_blocks( block_fn, index, dim, layers, num_heads=1, kernel_size=3, padding=1, stride=2, mlp_ratio=3., qkv_bias=False, attn_drop=0, drop_path_rate=0., **kwargs, ): """ generate outlooker layer in stage1 return: outlooker layers """ blocks = [] for block_idx in range(layers[index]): block_dpr = drop_path_rate * (block_idx + sum(layers[:index])) / (sum(layers) - 1) blocks.append(block_fn( dim, kernel_size=kernel_size, padding=padding, stride=stride, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, attn_drop=attn_drop, drop_path=block_dpr, )) blocks = nn.Sequential(*blocks) return blocks def transformer_blocks( block_fn, index, dim, layers, num_heads, mlp_ratio=3., qkv_bias=False, attn_drop=0, drop_path_rate=0., **kwargs, ): """ generate transformer layers in stage2 return: transformer layers """ blocks = [] for block_idx in range(layers[index]): block_dpr = drop_path_rate * (block_idx + sum(layers[:index])) / (sum(layers) - 1) blocks.append(block_fn( dim, num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, attn_drop=attn_drop, drop_path=block_dpr, )) blocks = nn.Sequential(*blocks) return blocks class VOLO(nn.Module): """ Vision Outlooker, the main class of our model """ def __init__( self, layers, img_size=224, in_chans=3, num_classes=1000, global_pool='token', patch_size=8, stem_hidden_dim=64, embed_dims=None, num_heads=None, downsamples=(True, False, False, False), outlook_attention=(True, False, False, False), mlp_ratio=3.0, qkv_bias=False, drop_rate=0., pos_drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm, post_layers=('ca', 'ca'), use_aux_head=True, use_mix_token=False, pooling_scale=2, ): super().__init__() num_layers = len(layers) mlp_ratio = to_ntuple(num_layers)(mlp_ratio) img_size = to_2tuple(img_size) self.num_classes = num_classes self.global_pool = global_pool self.mix_token = use_mix_token self.pooling_scale = pooling_scale self.num_features = self.head_hidden_size = embed_dims[-1] if use_mix_token: # enable token mixing, see token labeling for details. self.beta = 1.0 assert global_pool == 'token', "return all tokens if mix_token is enabled" self.grad_checkpointing = False self.patch_embed = PatchEmbed( stem_conv=True, stem_stride=2, patch_size=patch_size, in_chans=in_chans, hidden_dim=stem_hidden_dim, embed_dim=embed_dims[0], ) r = patch_size # inital positional encoding, we add positional encoding after outlooker blocks patch_grid = (img_size[0] // patch_size // pooling_scale, img_size[1] // patch_size // pooling_scale) self.pos_embed = nn.Parameter(torch.zeros(1, patch_grid[0], patch_grid[1], embed_dims[-1])) self.pos_drop = nn.Dropout(p=pos_drop_rate) # set the main block in network self.stage_ends = [] self.feature_info = [] network = [] block_idx = 0 for i in range(len(layers)): if outlook_attention[i]: # stage 1 stage = outlooker_blocks( Outlooker, i, embed_dims[i], layers, num_heads[i], mlp_ratio=mlp_ratio[i], qkv_bias=qkv_bias, attn_drop=attn_drop_rate, norm_layer=norm_layer, ) else: # stage 2 stage = transformer_blocks( Transformer, i, embed_dims[i], layers, num_heads[i], mlp_ratio=mlp_ratio[i], qkv_bias=qkv_bias, drop_path_rate=drop_path_rate, attn_drop=attn_drop_rate, norm_layer=norm_layer, ) network.append(stage) self.stage_ends.append(block_idx) self.feature_info.append(dict(num_chs=embed_dims[i], reduction=r, module=f'network.{block_idx}')) block_idx += 1 if downsamples[i]: # downsampling between two stages network.append(Downsample(embed_dims[i], embed_dims[i + 1], 2)) r *= 2 block_idx += 1 self.network = nn.ModuleList(network) # set post block, for example, class attention layers self.post_network = None if post_layers is not None: self.post_network = nn.ModuleList([ get_block( post_layers[i], dim=embed_dims[-1], num_heads=num_heads[-1], mlp_ratio=mlp_ratio[-1], qkv_bias=qkv_bias, attn_drop=attn_drop_rate, drop_path=0., norm_layer=norm_layer) for i in range(len(post_layers)) ]) self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dims[-1])) trunc_normal_(self.cls_token, std=.02) # set output type if use_aux_head: self.aux_head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() else: self.aux_head = None self.norm = norm_layer(self.num_features) # Classifier head self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() trunc_normal_(self.pos_embed, std=.02) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) @torch.jit.ignore def no_weight_decay(self): return {'pos_embed', 'cls_token'} @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^cls_token|pos_embed|patch_embed', # stem and embed blocks=[ (r'^network\.(\d+)\.(\d+)', None), (r'^network\.(\d+)', (0,)), ], blocks2=[ (r'^cls_token', (0,)), (r'^post_network\.(\d+)', None), (r'^norm', (99999,)) ], ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): self.num_classes = num_classes if global_pool is not None: self.global_pool = global_pool self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() if self.aux_head is not None: self.aux_head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() def forward_tokens(self, x): for idx, block in enumerate(self.network): if idx == 2: # add positional encoding after outlooker blocks x = x + self.pos_embed x = self.pos_drop(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(block, x) else: x = block(x) B, H, W, C = x.shape x = x.reshape(B, -1, C) return x def forward_cls(self, x): B, N, C = x.shape cls_tokens = self.cls_token.expand(B, -1, -1) x = torch.cat([cls_tokens, x], dim=1) for block in self.post_network: if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(block, x) else: x = block(x) return x def forward_train(self, x): """ A separate forward fn for training with mix_token (if a train script supports). Combining multiple modes in as single forward with different return types is torchscript hell. """ x = self.patch_embed(x) x = x.permute(0, 2, 3, 1) # B,C,H,W-> B,H,W,C # mix token, see token labeling for details. if self.mix_token and self.training: lam = np.random.beta(self.beta, self.beta) patch_h, patch_w = x.shape[1] // self.pooling_scale, x.shape[2] // self.pooling_scale bbx1, bby1, bbx2, bby2 = rand_bbox(x.size(), lam, scale=self.pooling_scale) temp_x = x.clone() sbbx1, sbby1 = self.pooling_scale * bbx1, self.pooling_scale * bby1 sbbx2, sbby2 = self.pooling_scale * bbx2, self.pooling_scale * bby2 temp_x[:, sbbx1:sbbx2, sbby1:sbby2, :] = x.flip(0)[:, sbbx1:sbbx2, sbby1:sbby2, :] x = temp_x else: bbx1, bby1, bbx2, bby2 = 0, 0, 0, 0 # step2: tokens learning in the two stages x = self.forward_tokens(x) # step3: post network, apply class attention or not if self.post_network is not None: x = self.forward_cls(x) x = self.norm(x) if self.global_pool == 'avg': x_cls = x.mean(dim=1) elif self.global_pool == 'token': x_cls = x[:, 0] else: x_cls = x if self.aux_head is None: return x_cls x_aux = self.aux_head(x[:, 1:]) # generate classes in all feature tokens, see token labeling if not self.training: return x_cls + 0.5 * x_aux.max(1)[0] if self.mix_token and self.training: # reverse "mix token", see token labeling for details. x_aux = x_aux.reshape(x_aux.shape[0], patch_h, patch_w, x_aux.shape[-1]) temp_x = x_aux.clone() temp_x[:, bbx1:bbx2, bby1:bby2, :] = x_aux.flip(0)[:, bbx1:bbx2, bby1:bby2, :] x_aux = temp_x x_aux = x_aux.reshape(x_aux.shape[0], patch_h * patch_w, x_aux.shape[-1]) # return these: 1. class token, 2. classes from all feature tokens, 3. bounding box return x_cls, x_aux, (bbx1, bby1, bbx2, bby2) def forward_intermediates( self, x: torch.Tensor, indices: Optional[Union[int, List[int]]] = None, norm: bool = False, stop_early: bool = False, output_fmt: str = 'NCHW', intermediates_only: bool = False, ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: """ Forward features that returns intermediates. Args: x: Input image tensor indices: Take last n blocks if int, all if None, select matching indices if sequence norm: Apply norm layer to all intermediates stop_early: Stop iterating over blocks when last desired intermediate hit output_fmt: Shape of intermediate feature outputs intermediates_only: Only return intermediate features Returns: """ assert output_fmt in ('NCHW',), 'Output format must be NCHW.' intermediates = [] take_indices, max_index = feature_take_indices(len(self.stage_ends), indices) take_indices = [self.stage_ends[i] for i in take_indices] max_index = self.stage_ends[max_index] # forward pass B, _, height, width = x.shape x = self.patch_embed(x).permute(0, 2, 3, 1) # B,C,H,W-> B,H,W,C # step2: tokens learning in the two stages if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript network = self.network else: network = self.network[:max_index + 1] for idx, block in enumerate(network): if idx == 2: # add positional encoding after outlooker blocks x = x + self.pos_embed x = self.pos_drop(x) x = block(x) if idx in take_indices: if norm and idx >= 2: x_inter = self.norm(x) else: x_inter = x intermediates.append(x_inter.permute(0, 3, 1, 2)) if intermediates_only: return intermediates # NOTE not supporting return of class tokens # step3: post network, apply class attention or not B, H, W, C = x.shape x = x.reshape(B, -1, C) if self.post_network is not None: x = self.forward_cls(x) x = self.norm(x) return x, intermediates def prune_intermediate_layers( self, indices: Union[int, List[int]] = 1, prune_norm: bool = False, prune_head: bool = True, ): """ Prune layers not required for specified intermediates. """ take_indices, max_index = feature_take_indices(len(self.stage_ends), indices) max_index = self.stage_ends[max_index] self.network = self.network[:max_index + 1] # truncate blocks if prune_norm: self.norm = nn.Identity() if prune_head: self.post_network = nn.ModuleList() # prune token blocks with head self.reset_classifier(0, '') return take_indices def forward_features(self, x): x = self.patch_embed(x).permute(0, 2, 3, 1) # B,C,H,W-> B,H,W,C # step2: tokens learning in the two stages x = self.forward_tokens(x) # step3: post network, apply class attention or not if self.post_network is not None: x = self.forward_cls(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): if self.global_pool == 'avg': out = x.mean(dim=1) elif self.global_pool == 'token': out = x[:, 0] else: out = x x = self.head_drop(x) if pre_logits: return out out = self.head(out) if self.aux_head is not None: # generate classes in all feature tokens, see token labeling aux = self.aux_head(x[:, 1:]) out = out + 0.5 * aux.max(1)[0] return out def forward(self, x): """ simplified forward (without mix token training) """ x = self.forward_features(x) x = self.forward_head(x) return x def _create_volo(variant, pretrained=False, **kwargs): out_indices = kwargs.pop('out_indices', 3) return build_model_with_cfg( VOLO, variant, pretrained, feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), **kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .96, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.conv.0', 'classifier': ('head', 'aux_head'), **kwargs } default_cfgs = generate_default_cfgs({ 'volo_d1_224.sail_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d1_224_84.2.pth.tar', crop_pct=0.96), 'volo_d1_384.sail_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d1_384_85.2.pth.tar', crop_pct=1.0, input_size=(3, 384, 384)), 'volo_d2_224.sail_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d2_224_85.2.pth.tar', crop_pct=0.96), 'volo_d2_384.sail_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d2_384_86.0.pth.tar', crop_pct=1.0, input_size=(3, 384, 384)), 'volo_d3_224.sail_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d3_224_85.4.pth.tar', crop_pct=0.96), 'volo_d3_448.sail_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d3_448_86.3.pth.tar', crop_pct=1.0, input_size=(3, 448, 448)), 'volo_d4_224.sail_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d4_224_85.7.pth.tar', crop_pct=0.96), 'volo_d4_448.sail_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d4_448_86.79.pth.tar', crop_pct=1.15, input_size=(3, 448, 448)), 'volo_d5_224.sail_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d5_224_86.10.pth.tar', crop_pct=0.96), 'volo_d5_448.sail_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d5_448_87.0.pth.tar', crop_pct=1.15, input_size=(3, 448, 448)), 'volo_d5_512.sail_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d5_512_87.07.pth.tar', crop_pct=1.15, input_size=(3, 512, 512)), }) @register_model def volo_d1_224(pretrained=False, **kwargs) -> VOLO: """ VOLO-D1 model, Params: 27M """ model_args = dict(layers=(4, 4, 8, 2), embed_dims=(192, 384, 384, 384), num_heads=(6, 12, 12, 12), **kwargs) model = _create_volo('volo_d1_224', pretrained=pretrained, **model_args) return model @register_model def volo_d1_384(pretrained=False, **kwargs) -> VOLO: """ VOLO-D1 model, Params: 27M """ model_args = dict(layers=(4, 4, 8, 2), embed_dims=(192, 384, 384, 384), num_heads=(6, 12, 12, 12), **kwargs) model = _create_volo('volo_d1_384', pretrained=pretrained, **model_args) return model @register_model def volo_d2_224(pretrained=False, **kwargs) -> VOLO: """ VOLO-D2 model, Params: 59M """ model_args = dict(layers=(6, 4, 10, 4), embed_dims=(256, 512, 512, 512), num_heads=(8, 16, 16, 16), **kwargs) model = _create_volo('volo_d2_224', pretrained=pretrained, **model_args) return model @register_model def volo_d2_384(pretrained=False, **kwargs) -> VOLO: """ VOLO-D2 model, Params: 59M """ model_args = dict(layers=(6, 4, 10, 4), embed_dims=(256, 512, 512, 512), num_heads=(8, 16, 16, 16), **kwargs) model = _create_volo('volo_d2_384', pretrained=pretrained, **model_args) return model @register_model def volo_d3_224(pretrained=False, **kwargs) -> VOLO: """ VOLO-D3 model, Params: 86M """ model_args = dict(layers=(8, 8, 16, 4), embed_dims=(256, 512, 512, 512), num_heads=(8, 16, 16, 16), **kwargs) model = _create_volo('volo_d3_224', pretrained=pretrained, **model_args) return model @register_model def volo_d3_448(pretrained=False, **kwargs) -> VOLO: """ VOLO-D3 model, Params: 86M """ model_args = dict(layers=(8, 8, 16, 4), embed_dims=(256, 512, 512, 512), num_heads=(8, 16, 16, 16), **kwargs) model = _create_volo('volo_d3_448', pretrained=pretrained, **model_args) return model @register_model def volo_d4_224(pretrained=False, **kwargs) -> VOLO: """ VOLO-D4 model, Params: 193M """ model_args = dict(layers=(8, 8, 16, 4), embed_dims=(384, 768, 768, 768), num_heads=(12, 16, 16, 16), **kwargs) model = _create_volo('volo_d4_224', pretrained=pretrained, **model_args) return model @register_model def volo_d4_448(pretrained=False, **kwargs) -> VOLO: """ VOLO-D4 model, Params: 193M """ model_args = dict(layers=(8, 8, 16, 4), embed_dims=(384, 768, 768, 768), num_heads=(12, 16, 16, 16), **kwargs) model = _create_volo('volo_d4_448', pretrained=pretrained, **model_args) return model @register_model def volo_d5_224(pretrained=False, **kwargs) -> VOLO: """ VOLO-D5 model, Params: 296M stem_hidden_dim=128, the dim in patch embedding is 128 for VOLO-D5 """ model_args = dict( layers=(12, 12, 20, 4), embed_dims=(384, 768, 768, 768), num_heads=(12, 16, 16, 16), mlp_ratio=4, stem_hidden_dim=128, **kwargs) model = _create_volo('volo_d5_224', pretrained=pretrained, **model_args) return model @register_model def volo_d5_448(pretrained=False, **kwargs) -> VOLO: """ VOLO-D5 model, Params: 296M stem_hidden_dim=128, the dim in patch embedding is 128 for VOLO-D5 """ model_args = dict( layers=(12, 12, 20, 4), embed_dims=(384, 768, 768, 768), num_heads=(12, 16, 16, 16), mlp_ratio=4, stem_hidden_dim=128, **kwargs) model = _create_volo('volo_d5_448', pretrained=pretrained, **model_args) return model @register_model def volo_d5_512(pretrained=False, **kwargs) -> VOLO: """ VOLO-D5 model, Params: 296M stem_hidden_dim=128, the dim in patch embedding is 128 for VOLO-D5 """ model_args = dict( layers=(12, 12, 20, 4), embed_dims=(384, 768, 768, 768), num_heads=(12, 16, 16, 16), mlp_ratio=4, stem_hidden_dim=128, **kwargs) model = _create_volo('volo_d5_512', pretrained=pretrained, **model_args) return model
pytorch-image-models/timm/models/volo.py/0
{ "file_path": "pytorch-image-models/timm/models/volo.py", "repo_id": "pytorch-image-models", "token_count": 17710 }
233
""" PyTorch MADGRAD optimizer MADGRAD: https://arxiv.org/abs/2101.11075 Code from: https://github.com/facebookresearch/madgrad """ # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from typing import TYPE_CHECKING, Any, Callable, Optional import torch import torch.optim if TYPE_CHECKING: from torch.optim.optimizer import _params_t else: _params_t = Any class MADGRAD(torch.optim.Optimizer): """ MADGRAD_: A Momentumized, Adaptive, Dual Averaged Gradient Method for Stochastic Optimization. .. _MADGRAD: https://arxiv.org/abs/2101.11075 MADGRAD is a general purpose optimizer that can be used in place of SGD or Adam may converge faster and generalize better. Currently GPU-only. Typically, the same learning rate schedule that is used for SGD or Adam may be used. The overall learning rate is not comparable to either method and should be determined by a hyper-parameter sweep. MADGRAD requires less weight decay than other methods, often as little as zero. Momentum values used for SGD or Adam's beta1 should work here also. On sparse problems both weight_decay and momentum should be set to 0. Arguments: params (iterable): Iterable of parameters to optimize or dicts defining parameter groups. lr (float): Learning rate (default: 1e-2). momentum (float): Momentum value in the range [0,1) (default: 0.9). weight_decay (float): Weight decay, i.e. a L2 penalty (default: 0). eps (float): Term added to the denominator outside of the root operation to improve numerical stability. (default: 1e-6). """ def __init__( self, params: _params_t, lr: float = 1e-2, momentum: float = 0.9, weight_decay: float = 0, eps: float = 1e-6, decoupled_decay: bool = False, ): if momentum < 0 or momentum >= 1: raise ValueError(f"Momentum {momentum} must be in the range [0,1]") if lr <= 0: raise ValueError(f"Learning rate {lr} must be positive") if weight_decay < 0: raise ValueError(f"Weight decay {weight_decay} must be non-negative") if eps < 0: raise ValueError(f"Eps must be non-negative") defaults = dict( lr=lr, eps=eps, momentum=momentum, weight_decay=weight_decay, decoupled_decay=decoupled_decay) super().__init__(params, defaults) @property def supports_memory_efficient_fp16(self) -> bool: return False @property def supports_flat_params(self) -> bool: return True @torch.no_grad() def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]: """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: eps = group['eps'] lr = group['lr'] + eps weight_decay = group['weight_decay'] momentum = group['momentum'] ck = 1 - momentum for p in group["params"]: if p.grad is None: continue grad = p.grad if momentum != 0.0 and grad.is_sparse: raise RuntimeError("momentum != 0 is not compatible with sparse gradients") state = self.state[p] if len(state) == 0: state['step'] = 0 state['grad_sum_sq'] = torch.zeros_like(p) state['s'] = torch.zeros_like(p) if momentum != 0: state['x0'] = torch.clone(p).detach() state['step'] += 1 grad_sum_sq = state['grad_sum_sq'] s = state['s'] lamb = lr * math.sqrt(state['step']) # Apply weight decay if weight_decay != 0: if group['decoupled_decay']: p.mul_(1.0 - group['lr'] * weight_decay) else: if grad.is_sparse: raise RuntimeError("weight_decay option is not compatible with sparse gradients") grad.add_(p, alpha=weight_decay) if grad.is_sparse: grad = grad.coalesce() grad_val = grad._values() p_masked = p.sparse_mask(grad) grad_sum_sq_masked = grad_sum_sq.sparse_mask(grad) s_masked = s.sparse_mask(grad) # Compute x_0 from other known quantities rms_masked_vals = grad_sum_sq_masked._values().pow(1 / 3).add_(eps) x0_masked_vals = p_masked._values().addcdiv(s_masked._values(), rms_masked_vals, value=1) # Dense + sparse op grad_sq = grad * grad grad_sum_sq.add_(grad_sq, alpha=lamb) grad_sum_sq_masked.add_(grad_sq, alpha=lamb) rms_masked_vals = grad_sum_sq_masked._values().pow_(1 / 3).add_(eps) s.add_(grad, alpha=lamb) s_masked._values().add_(grad_val, alpha=lamb) # update masked copy of p p_kp1_masked_vals = x0_masked_vals.addcdiv(s_masked._values(), rms_masked_vals, value=-1) # Copy updated masked p to dense p using an add operation p_masked._values().add_(p_kp1_masked_vals, alpha=-1) p.add_(p_masked, alpha=-1) else: if momentum == 0: # Compute x_0 from other known quantities rms = grad_sum_sq.pow(1 / 3).add_(eps) x0 = p.addcdiv(s, rms, value=1) else: x0 = state['x0'] # Accumulate second moments grad_sum_sq.addcmul_(grad, grad, value=lamb) rms = grad_sum_sq.pow(1 / 3).add_(eps) # Update s s.add_(grad, alpha=lamb) # Step if momentum == 0: p.copy_(x0.addcdiv(s, rms, value=-1)) else: z = x0.addcdiv(s, rms, value=-1) # p is a moving average of z p.mul_(1 - ck).add_(z, alpha=ck) return loss
pytorch-image-models/timm/optim/madgrad.py/0
{ "file_path": "pytorch-image-models/timm/optim/madgrad.py", "repo_id": "pytorch-image-models", "token_count": 3505 }
234
""" Step Scheduler Basic step LR schedule with warmup, noise. Hacked together by / Copyright 2020 Ross Wightman """ import math import torch from typing import List from .scheduler import Scheduler class StepLRScheduler(Scheduler): """ """ def __init__( self, optimizer: torch.optim.Optimizer, decay_t: float, decay_rate: float = 1., warmup_t=0, warmup_lr_init=0, warmup_prefix=True, t_in_epochs=True, noise_range_t=None, noise_pct=0.67, noise_std=1.0, noise_seed=42, initialize=True, ) -> None: super().__init__( optimizer, param_group_field="lr", t_in_epochs=t_in_epochs, noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, initialize=initialize, ) self.decay_t = decay_t self.decay_rate = decay_rate self.warmup_t = warmup_t self.warmup_lr_init = warmup_lr_init self.warmup_prefix = warmup_prefix if self.warmup_t: self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] super().update_groups(self.warmup_lr_init) else: self.warmup_steps = [1 for _ in self.base_values] def _get_lr(self, t: int) -> List[float]: if t < self.warmup_t: lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] else: if self.warmup_prefix: t = t - self.warmup_t lrs = [v * (self.decay_rate ** (t // self.decay_t)) for v in self.base_values] return lrs
pytorch-image-models/timm/scheduler/step_lr.py/0
{ "file_path": "pytorch-image-models/timm/scheduler/step_lr.py", "repo_id": "pytorch-image-models", "token_count": 951 }
235
from typing import Optional, Tuple, List import torch def onnx_forward(onnx_file, example_input): import onnxruntime sess_options = onnxruntime.SessionOptions() session = onnxruntime.InferenceSession(onnx_file, sess_options) input_name = session.get_inputs()[0].name output = session.run([], {input_name: example_input.numpy()}) output = output[0] return output def onnx_export( model: torch.nn.Module, output_file: str, example_input: Optional[torch.Tensor] = None, training: bool = False, verbose: bool = False, check: bool = True, check_forward: bool = False, batch_size: int = 64, input_size: Tuple[int, int, int] = None, opset: Optional[int] = None, dynamic_size: bool = False, aten_fallback: bool = False, keep_initializers: Optional[bool] = None, use_dynamo: bool = False, input_names: List[str] = None, output_names: List[str] = None, ): import onnx if training: training_mode = torch.onnx.TrainingMode.TRAINING model.train() else: training_mode = torch.onnx.TrainingMode.EVAL model.eval() if example_input is None: if not input_size: assert hasattr(model, 'default_cfg') input_size = model.default_cfg.get('input_size') example_input = torch.randn((batch_size,) + input_size, requires_grad=training) # Run model once before export trace, sets padding for models with Conv2dSameExport. This means # that the padding for models with Conv2dSameExport (most models with tf_ prefix) is fixed for # the input img_size specified in this script. # Opset >= 11 should allow for dynamic padding, however I cannot get it to work due to # issues in the tracing of the dynamic padding or errors attempting to export the model after jit # scripting it (an approach that should work). Perhaps in a future PyTorch or ONNX versions... with torch.no_grad(): original_out = model(example_input) input_names = input_names or ["input0"] output_names = output_names or ["output0"] dynamic_axes = {'input0': {0: 'batch'}, 'output0': {0: 'batch'}} if dynamic_size: dynamic_axes['input0'][2] = 'height' dynamic_axes['input0'][3] = 'width' if aten_fallback: export_type = torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK else: export_type = torch.onnx.OperatorExportTypes.ONNX if use_dynamo: export_options = torch.onnx.ExportOptions(dynamic_shapes=dynamic_size) export_output = torch.onnx.dynamo_export( model, example_input, export_options=export_options, ) export_output.save(output_file) torch_out = None else: torch_out = torch.onnx._export( model, example_input, output_file, training=training_mode, export_params=True, verbose=verbose, input_names=input_names, output_names=output_names, keep_initializers_as_inputs=keep_initializers, dynamic_axes=dynamic_axes, opset_version=opset, operator_export_type=export_type ) if check: onnx_model = onnx.load(output_file) onnx.checker.check_model(onnx_model, full_check=True) # assuming throw on error if check_forward and not training: import numpy as np onnx_out = onnx_forward(output_file, example_input) if torch_out is not None: np.testing.assert_almost_equal(torch_out.numpy(), onnx_out, decimal=3) np.testing.assert_almost_equal(original_out.numpy(), torch_out.numpy(), decimal=5) else: np.testing.assert_almost_equal(original_out.numpy(), onnx_out, decimal=3)
pytorch-image-models/timm/utils/onnx.py/0
{ "file_path": "pytorch-image-models/timm/utils/onnx.py", "repo_id": "pytorch-image-models", "token_count": 1722 }
236
install-server: cd server && make install install-server-cpu: cd server && make install-server install-router: cargo install --path backends/v3/ install-launcher: cargo install --path launcher/ install-benchmark: cargo install --path benchmark/ install: install-server install-router install-launcher install-cpu: install-server-cpu install-router install-launcher server-dev: cd server && make run-dev router-dev: cd router && cargo run -- --port 8080 rust-tests: install-router install-launcher cargo test install-integration-tests: cd integration-tests && pip install -r requirements.txt cd clients/python && pip install . integration-tests: install-integration-tests pytest -s -vv -m "not private" integration-tests update-integration-tests: install-integration-tests pytest -s -vv --snapshot-update integration-tests python-server-tests: HF_HUB_ENABLE_HF_TRANSFER=1 pytest -s -vv -m "not private" server/tests python-client-tests: pytest clients/python/tests python-tests: python-server-tests python-client-tests run-falcon-7b-instruct: text-generation-launcher --model-id tiiuae/falcon-7b-instruct --port 8080 run-falcon-7b-instruct-quantize: text-generation-launcher --model-id tiiuae/falcon-7b-instruct --quantize bitsandbytes --port 8080 clean: rm -rf target aml
text-generation-inference/Makefile/0
{ "file_path": "text-generation-inference/Makefile", "repo_id": "text-generation-inference", "token_count": 440 }
237
//! A crate to extract and inject a OpenTelemetry context from and to a gRPC request. //! Inspired by: https://github.com/open-telemetry/opentelemetry-rust gRPC examples use opentelemetry::global; use opentelemetry::propagation::Injector; use tracing_opentelemetry::OpenTelemetrySpanExt; /// Inject context in the metadata of a gRPC request. struct MetadataInjector<'a>(pub &'a mut tonic::metadata::MetadataMap); impl<'a> Injector for MetadataInjector<'a> { /// Set a key and value in the MetadataMap. Does nothing if the key or value are not valid inputs fn set(&mut self, key: &str, value: String) { if let Ok(key) = tonic::metadata::MetadataKey::from_bytes(key.as_bytes()) { if let Ok(val) = value.parse() { self.0.insert(key, val); } } } } /// Get a context from the global context and inject the span into a gRPC request's metadata. fn inject(metadata: &mut tonic::metadata::MetadataMap) { global::get_text_map_propagator(|propagator| { propagator.inject_context( &tracing::Span::current().context(), &mut MetadataInjector(metadata), ) }) } pub trait InjectTelemetryContext { fn inject_context(self) -> Self; } impl<T> InjectTelemetryContext for tonic::Request<T> { fn inject_context(mut self) -> Self { inject(self.metadata_mut()); self } }
text-generation-inference/backends/grpc-metadata/src/lib.rs/0
{ "file_path": "text-generation-inference/backends/grpc-metadata/src/lib.rs", "repo_id": "text-generation-inference", "token_count": 548 }
238
use std::future::Future; use std::path::Path; use std::pin::{pin, Pin}; use std::str::FromStr; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::{Arc, OnceLock}; use std::task::{Context, Poll}; use std::time::Duration; use async_trait::async_trait; use cxx::UniquePtr; use log::{error, warn}; use tokenizers::Tokenizer; use tokio::sync::mpsc::{unbounded_channel, UnboundedSender}; use tokio::time::{sleep, Instant}; use tokio_stream::wrappers::UnboundedReceiverStream; use tokio_stream::{Stream, StreamExt}; use tracing::{instrument, span, Level}; // use tokio::sync::RwLock; use parking_lot::RwLock; use text_generation_router::infer::{Backend, GeneratedText, InferError, InferStreamResponse}; use text_generation_router::validation::ValidationError::UnsupportedModality; use text_generation_router::validation::{Chunk, ValidGenerateRequest, ValidationError}; use text_generation_router::{FinishReason, Token}; use crate::errors::TensorRtLlmBackendError; use crate::ffi::{create_tensorrt_llm_backend, GenerationStep, TensorRtLlmBackendImpl}; // Value used to poll the state of the generation stream static POLLING_INTERVAL_US: OnceLock<u64> = OnceLock::new(); type InferResult<T> = Result<T, InferError>; pub(crate) struct Generation { executor: Arc<RwLock<UniquePtr<TensorRtLlmBackendImpl>>>, done: Arc<AtomicBool>, } /// Holds the user provided input to be executed along with a channel allowing /// to bubble up all the generated tokens for that tokens the to end stream. pub struct GenerationContext { sender: UnboundedSender<InferResult<InferStreamResponse>>, tokenizer: Arc<Tokenizer>, tokens: Vec<u32>, done: Arc<AtomicBool>, queued: Instant, start: Option<Instant>, } impl Stream for Generation { type Item = usize; fn poll_next(self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll<Option<Self::Item>> { let interval = POLLING_INTERVAL_US.get_or_init(|| { u64::from_str(option_env!("TRTLLM_BACKEND_POLLING_INTERVAL_US").unwrap_or("100")) .expect("Invalid value provided for envvar POLLING_INTERVAL_US") }); if !self.done.load(Ordering::Relaxed) { let backend = pin!(self.executor.read()); let status = match backend.poll(ctx) { Poll::Ready(executor_r) => { let ready = executor_r.num_responses_ready(); if ready == 0 { Poll::Pending } else { Poll::Ready(Some(ready)) } } Poll::Pending => Poll::Pending, }; let waker = ctx.waker().clone(); tokio::spawn(async { sleep(Duration::from_micros(*interval)).await; waker.wake(); }); status } else { Poll::Ready(None) // end of stream } } fn size_hint(&self) -> (usize, Option<usize>) { (1, None) } } unsafe impl Send for TensorRtLlmBackendImpl {} unsafe impl Sync for TensorRtLlmBackendImpl {} /// Implements the logic to execute generation with TensorRT-LLM executor API in background pub struct TensorRtLlmBackend { tokenizer: Arc<Tokenizer>, // Backing the backend behind a RwLock to allow concurrent read access to retrieve // the number of available tokens (read only) in the Generation stream backend: Arc<RwLock<UniquePtr<TensorRtLlmBackendImpl>>>, } impl TensorRtLlmBackend { pub fn new<P: AsRef<Path> + Send + 'static, PP: AsRef<Path> + Send + 'static>( tokenizer: Tokenizer, engine_folder: P, executor_worker_path: PP, ) -> Result<Self, TensorRtLlmBackendError> { Ok(TensorRtLlmBackend { tokenizer: Arc::new(tokenizer), backend: Arc::new(RwLock::new(create_tensorrt_llm_backend( engine_folder.as_ref().to_str().unwrap(), executor_worker_path.as_ref().to_str().unwrap(), ))), }) } fn validate(request: &ValidGenerateRequest) -> InferResult<&String> { if request.top_n_tokens > 1 { return Err(InferError::ValidationError( ValidationError::TopNTokensDisabled, )); } // TODO: Is it really needed? How can it be validated before? if request.parameters.grammar.is_some() { return Err(InferError::ValidationError(ValidationError::Grammar)); } match request.inputs.len() { 0 => Err(InferError::ValidationError(ValidationError::EmptyInput)), 2.. => Err(InferError::GenerationError( "TensorRT-LLM backend don't support multi-chunk".into(), )), 1 => match request.inputs.first().expect("Single item-chunk") { Chunk::Text(text) => Ok(text), Chunk::Image(_) => Err(InferError::ValidationError(UnsupportedModality("image"))), }, } } fn generate( &self, sender: UnboundedSender<InferResult<InferStreamResponse>>, tokens: Vec<u32>, top_k: u32, top_p: f32, temperature: f32, repetition_penalty: f32, frequency_penalty: f32, seed: u64, ) { let tokenizer = Arc::clone(&self.tokenizer); let executor = Arc::clone(&self.backend); // Let's push this in async context tokio::spawn(async move { // Define the generation state let mut generation = Generation { executor: executor.clone(), done: Arc::new(AtomicBool::new(false)), }; // Define the context over the generation // TODO(asap): Do we really need so many shared-ownership? let ctx = Box::new(GenerationContext { sender: sender.clone(), tokenizer, tokens: vec![], done: Arc::clone(&generation.done), start: None, queued: Instant::now(), }); // We are leaking the context on-purpose to avoid the box being dropped while there are // still computation ongoing // TODO(asap): Can we achieve the same with an Arc<Box<T>> without the need to go unsafe? let ctx_ = Box::leak(ctx); // Submit the request to the batcher let request_id = span!(Level::DEBUG, "submit") .in_scope(|| async { let mut handle = executor.write().await; let request_id = handle.pin_mut().submit( &tokens, top_k as i32, top_p, temperature, repetition_penalty, frequency_penalty, seed, ); request_id }) .await; while let Some(_) = generation.next().await { let mut executor_w = executor.write().await; let executor = executor_w.pin_mut(); span!(Level::DEBUG, "decode") .in_scope(|| async { unsafe { executor.stream_tokens( request_id, ctx_, |ctx: *mut GenerationContext, step: GenerationStep| { let inner_ctx = &mut *ctx; // Update the timestamp at which the request started effectively // Can be a bit off, would need to be before the callback, let's see inner_ctx.start.get_or_insert(Instant::now()); inner_ctx.done.store(step.is_final, Ordering::Relaxed); // Ensure we are not running into errors let parcel = if !step.has_error { // Insert the latest generated token to the tracker inner_ctx.tokens.push(step.token_id); // Decode the token let text = inner_ctx .tokenizer .decode(&[step.token_id], true) .expect("Failed to decode token"); let special = inner_ctx .tokenizer .get_added_vocabulary() .is_special_token(&text); // Create the structure holding the token let token = Token { id: step.token_id, text, logprob: step.log_prob, special, }; if step.is_final { let generated_text = inner_ctx .tokenizer .decode(&inner_ctx.tokens, true) .expect("Failed to decode generated_tokens"); Ok(InferStreamResponse::End { token, top_tokens: vec![], generated_text: GeneratedText { text: generated_text, generated_tokens: inner_ctx.tokens.len() as u32, finish_reason: FinishReason::EndOfSequenceToken, seed: None, }, start: inner_ctx.start.unwrap_or(Instant::now()), queued: inner_ctx.queued, }) } else { Ok(InferStreamResponse::Intermediate { token, top_tokens: vec![], }) } } else { error!("Error caught while decoding: {}", &step.error_msg); Err(InferError::GenerationError(step.error_msg)) }; // Send the parcel to the client inner_ctx .sender .send(parcel) .expect("Failed to sent msg through the channel"); }, ); } }) .await; } // "Properly" free the shared context... // TODO: clean that piece of sh** asap unsafe { let _ = Box::from_raw(ctx_); } }); } } #[async_trait] impl Backend for TensorRtLlmBackend { #[instrument(skip_all)] fn schedule( &self, request: ValidGenerateRequest, ) -> InferResult<UnboundedReceiverStream<InferResult<InferStreamResponse>>> { // Let's add a few more validation let input = TensorRtLlmBackend::validate(&request)?; // Channel to stream the generated token as they come from the worker thread back to the transport layer let (sender, receiver) = unbounded_channel(); // Unpack parameters let params = &request.parameters; // Preprocess the inputs to send to TRTLLM backend let encoding = self .tokenizer .encode(input.as_str(), true) .map_err(|e| InferError::GenerationError(e.to_string()))?; // Generate the response self.generate( sender, Vec::from(encoding.get_ids()), params.top_k, params.top_p, params.temperature, params.repetition_penalty, params.frequency_penalty, params.seed, ); Ok(UnboundedReceiverStream::new(receiver)) } async fn health(&self, _current_health: bool) -> bool { true } }
text-generation-inference/backends/trtllm/src/backend.rs/0
{ "file_path": "text-generation-inference/backends/trtllm/src/backend.rs", "repo_id": "text-generation-inference", "token_count": 7316 }
239
use crate::block_allocator::{BlockAllocation, BlockAllocator}; use crate::client; use crate::client::{ Batch, GrammarType, NextTokenChooserParameters, Request, StoppingCriteriaParameters, }; use nohash_hasher::{BuildNoHashHasher, IntMap}; use std::cmp::{max, min}; use std::collections::VecDeque; use text_generation_router::infer::InferError; use text_generation_router::infer::InferStreamResponse; use text_generation_router::validation::{ Chunk, ChunksToString, ValidGenerateRequest, ValidGrammar, ValidParameters, ValidStoppingParameters, }; use tokio::sync::{mpsc, oneshot}; use tokio::time::Instant; use tracing::{info_span, instrument, Instrument, Span}; /// Queue entry #[derive(Debug)] pub(crate) struct Entry { /// Request pub request: ValidGenerateRequest, /// Response sender to communicate between the Infer struct and the batching_task pub response_tx: mpsc::UnboundedSender<Result<InferStreamResponse, InferError>>, /// Span that will live as long as entry pub span: Span, /// Temporary span used as a guard when logging inference, wait times... pub temp_span: Option<Span>, /// Instant when this entry was queued pub queue_time: Instant, /// Instant when this entry was added to a batch pub batch_time: Option<Instant>, /// Block Allocation pub block_allocation: Option<BlockAllocation>, } /// Request Queue #[derive(Debug, Clone)] pub(crate) struct Queue { /// Channel to communicate with the background queue task queue_sender: mpsc::UnboundedSender<QueueCommand>, } impl Queue { pub(crate) fn new( requires_padding: bool, block_size: u32, prefix_caching: bool, window_size: Option<u32>, speculate: u32, max_batch_total_tokens: u32, ) -> Self { // Create channel let (queue_sender, queue_receiver) = mpsc::unbounded_channel(); // Launch background queue task tokio::spawn(queue_task( requires_padding, block_size, prefix_caching, window_size, speculate, max_batch_total_tokens, queue_receiver, )); Self { queue_sender } } /// Append an entry to the queue #[instrument(skip_all)] pub(crate) fn append(&self, entry: Entry) { // Send append command to the background task managing the state // Unwrap is safe here self.queue_sender .send(QueueCommand::Append(Box::new(entry), Span::current())) .unwrap(); } // Get the next batch #[instrument(skip(self))] pub(crate) async fn next_batch( &self, min_size: Option<usize>, max_size: Option<usize>, prefill_token_budget: u32, token_budget: u32, ) -> Option<NextBatch> { // Create response channel let (response_sender, response_receiver) = oneshot::channel(); // Send next batch command to the background task managing the state // Unwrap is safe here self.queue_sender .send(QueueCommand::NextBatch { min_size, max_size, prefill_token_budget, token_budget, response_sender, span: Span::current(), }) .unwrap(); // Await on response channel // Unwrap is safe here response_receiver.await.unwrap() } } // Background task responsible of the queue state async fn queue_task( requires_padding: bool, block_size: u32, prefix_caching: bool, window_size: Option<u32>, speculate: u32, max_batch_total_tokens: u32, mut receiver: mpsc::UnboundedReceiver<QueueCommand>, ) { let mut state = State::new( requires_padding, block_size, prefix_caching, window_size, speculate, max_batch_total_tokens, ); while let Some(cmd) = receiver.recv().await { match cmd { QueueCommand::Append(entry, span) => { span.in_scope(|| state.append(*entry)); metrics::gauge!("tgi_queue_size").increment(1.0); } QueueCommand::NextBatch { min_size, max_size, prefill_token_budget, token_budget, response_sender, span, } => { let next_batch = state .next_batch(min_size, max_size, prefill_token_budget, token_budget) .instrument(span) .await; response_sender.send(next_batch).unwrap(); metrics::gauge!("tgi_queue_size").set(state.entries.len() as f64); } } } } /// Queue State #[derive(Debug)] struct State { /// Queue entries organized in a Vec entries: VecDeque<(u64, Entry)>, /// Id of the next entry next_id: u64, /// Id of the next batch next_batch_id: u64, /// Paged Attention block size block_size: u32, /// Sliding window window_size: Option<u32>, /// Speculation amount speculate: u32, /// Paged Attention Block Allocation block_allocator: Option<BlockAllocator>, } impl State { fn new( requires_padding: bool, block_size: u32, prefix_caching: bool, window_size: Option<u32>, speculate: u32, max_batch_total_tokens: u32, ) -> Self { let block_allocator = (!requires_padding).then(|| { BlockAllocator::new( max_batch_total_tokens, block_size, prefix_caching, window_size, ) }); Self { entries: VecDeque::with_capacity(128), next_id: 0, next_batch_id: 0, block_size, window_size, speculate, block_allocator, } } /// Append an entry to the queue fn append(&mut self, mut entry: Entry) { // Create a span that will live as long as the entry is in the queue waiting to be batched let queue_span = info_span!(parent: &entry.span, "queued"); entry.temp_span = Some(queue_span); // Push entry in the queue self.entries.push_back((self.next_id, entry)); self.next_id += 1; } // Get the next batch async fn next_batch( &mut self, min_size: Option<usize>, max_size: Option<usize>, prefill_token_budget: u32, token_budget: u32, ) -> Option<NextBatch> { if self.entries.is_empty() { tracing::debug!("No queue"); return None; } // Check if we have enough entries if let Some(min_size) = min_size { if self.entries.len() < min_size { tracing::debug!("Not enough entries"); return None; } } if let Some(max_size) = max_size { if max_size == 0 { tracing::debug!("No capacity"); return None; } } // Pad prefill_token_budget to be a multiple of block size let prefill_token_budget = ((prefill_token_budget + self.block_size - 1) / self.block_size) * self.block_size; // Create span for this batch to add context to inference calls let next_batch_span = info_span!(parent: None, "batch", batch_size = tracing::field::Empty); next_batch_span.follows_from(Span::current()); let mut batch_requests = Vec::with_capacity(self.entries.len()); let mut batch_entries = IntMap::with_capacity_and_hasher(self.entries.len(), BuildNoHashHasher::default()); let mut max_input_length = 0; let mut prefill_tokens: u32 = 0; let mut decode_tokens: u32 = 0; let mut max_blocks = 0; // Pop entries starting from the front of the queue 'entry_loop: while let Some((id, mut entry)) = self.entries.pop_front() { // Filter entries where the response receiver was dropped (== entries where the request // was dropped by the client) if entry.response_tx.is_closed() { metrics::counter!("tgi_request_failure", "err" => "dropped").increment(1); tracing::debug!("Dropping entry"); continue; } let block_allocation = match &self.block_allocator { None => { // We pad to max input length in the Python shards // We need to take these padding tokens into the equation max_input_length = max_input_length.max(entry.request.input_length); prefill_tokens = (batch_requests.len() + 1) as u32 * max_input_length; decode_tokens += entry.request.stopping_parameters.max_new_tokens; let total_tokens = prefill_tokens + decode_tokens + self.speculate; if prefill_tokens > prefill_token_budget || total_tokens > token_budget { // Entry is over budget // Add it back to the front tracing::debug!("Over budget: prefill_tokens={prefill_tokens} > {prefill_token_budget} || {prefill_tokens} + {decode_tokens} + {} > {token_budget}", self.speculate); self.entries.push_front((id, entry)); break 'entry_loop; } None } Some(block_allocator) => { prefill_tokens += entry.request.input_length; let max_new_tokens = match self.window_size { None => entry.request.stopping_parameters.max_new_tokens, Some(window_size) => min( window_size.saturating_sub(entry.request.input_length), entry.request.stopping_parameters.max_new_tokens, ), }; decode_tokens += max_new_tokens; if prefill_tokens > prefill_token_budget || (prefill_tokens + decode_tokens + self.speculate) > token_budget { // Entry is over budget // Add it back to the front tracing::debug!("Over budget: prefill_tokens={prefill_tokens} > {prefill_token_budget} || {prefill_tokens} + {decode_tokens} + {} > {token_budget}", self.speculate); self.entries.push_front((id, entry)); break; } let tokens = entry.request.input_length + entry.request.stopping_parameters.max_new_tokens + self.speculate - 1; // If users wants the prefill logprobs, we cannot reuse the cache. // So no input_ids for the radix tree. let input_ids = if entry.request.decoder_input_details { None } else { entry.request.input_ids.clone() }; match block_allocator.allocate(tokens, input_ids).await { None => { // Entry is over budget // Add it back to the front tracing::debug!("Over budget: not enough free blocks"); self.entries.push_front((id, entry)); break 'entry_loop; } Some(block_allocation) => { tracing::debug!("Allocation: {block_allocation:?}"); max_blocks = max(max_blocks, block_allocation.blocks.len() as u32); Some(block_allocation) } } } }; tracing::debug!("Accepting entry"); // Create a new span to link the batch back to this entry let entry_batch_span = info_span!(parent: &entry.span, "infer"); // Add relationships next_batch_span.follows_from(&entry_batch_span); entry_batch_span.follows_from(&next_batch_span); // Update entry entry.temp_span = Some(entry_batch_span); let (blocks, slots, prefix_len) = match &block_allocation { None => (Vec::new(), Vec::new(), 0), Some(block_allocation) => ( block_allocation.blocks.clone(), block_allocation.slots.clone(), block_allocation.prefix_len, ), }; entry.block_allocation = block_allocation; batch_requests.push(Request { id, prefill_logprobs: entry.request.decoder_input_details, input_chunks: Some(client::Input { chunks: entry .request .inputs .clone() .into_iter() .map(|c| client::InputChunk { chunk: Some(match c { Chunk::Text(text) => client::Chunk::Text(text), Chunk::Image(image) => client::Chunk::Image(client::Image { data: image.data, mimetype: image.mimetype, }), }), }) .collect(), }), inputs: entry.request.inputs.chunks_to_string(), truncate: entry.request.truncate, add_special_tokens: entry.request.add_special_tokens, parameters: Some(NextTokenChooserParameters::from( entry.request.parameters.clone(), )), stopping_parameters: Some(StoppingCriteriaParameters::from( entry.request.stopping_parameters.clone(), )), top_n_tokens: entry.request.top_n_tokens, blocks, slots, prefix_len, adapter_id: entry.request.adapter_id.clone(), }); // Set batch_time entry.batch_time = Some(Instant::now()); // Insert in batch_entries IntMap batch_entries.insert(id, entry); // Check if max_size if Some(batch_requests.len()) == max_size { break; } } // Empty batch if batch_requests.is_empty() { tracing::debug!("Filterered out all entries"); return None; } // Check if our batch is big enough if let Some(min_size) = min_size { // Batch is too small if batch_requests.len() < min_size { // Add back entries to the queue in the correct order for r in batch_requests.into_iter().rev() { let id = r.id; let entry = batch_entries.remove(&id).unwrap(); self.entries.push_front((id, entry)); } return None; } } // Final batch size let size = batch_requests.len() as u32; next_batch_span.record("batch_size", size); let batch = Batch { id: self.next_batch_id, requests: batch_requests, size, max_tokens: (prefill_tokens + decode_tokens), max_blocks, }; // Increment batch id self.next_batch_id += 1; metrics::histogram!("tgi_batch_next_size").record(batch.size as f64); Some((batch_entries, batch, next_batch_span)) } } type NextBatch = (IntMap<u64, Entry>, Batch, Span); #[derive(Debug)] enum QueueCommand { Append(Box<Entry>, Span), NextBatch { min_size: Option<usize>, max_size: Option<usize>, prefill_token_budget: u32, token_budget: u32, response_sender: oneshot::Sender<Option<NextBatch>>, span: Span, }, } impl From<ValidParameters> for NextTokenChooserParameters { fn from(value: ValidParameters) -> Self { let (grammar, grammar_type) = match value.grammar { None => (String::new(), GrammarType::None), Some(grammar) => match grammar { ValidGrammar::Json(grammar_string) => (grammar_string, GrammarType::Json), ValidGrammar::Regex(grammar_string) => (grammar_string, GrammarType::Regex), }, }; Self { temperature: value.temperature, top_k: value.top_k, top_p: value.top_p, typical_p: value.typical_p, do_sample: value.do_sample, seed: value.seed, repetition_penalty: value.repetition_penalty, frequency_penalty: value.frequency_penalty, watermark: value.watermark, grammar, grammar_type: grammar_type.into(), } } } impl From<ValidStoppingParameters> for StoppingCriteriaParameters { fn from(value: ValidStoppingParameters) -> Self { Self { max_new_tokens: value.max_new_tokens, stop_sequences: value.stop_sequences, ignore_eos_token: value.ignore_eos_token, } } } #[cfg(test)] mod tests { use std::sync::Arc; use super::*; use tracing::info_span; fn default_entry() -> ( Entry, mpsc::UnboundedReceiver<Result<InferStreamResponse, InferError>>, ) { let (response_tx, receiver_tx) = mpsc::unbounded_channel(); let entry = Entry { request: ValidGenerateRequest { inputs: vec![], input_ids: Some(Arc::new(vec![])), input_length: 0, add_special_tokens: true, truncate: 0, decoder_input_details: false, parameters: ValidParameters { temperature: 0.0, top_k: 0, top_p: 0.0, typical_p: 0.0, do_sample: false, seed: 0, repetition_penalty: 0.0, frequency_penalty: 0.0, watermark: false, grammar: None, }, stopping_parameters: ValidStoppingParameters { ignore_eos_token: false, max_new_tokens: 1, stop_sequences: vec![], }, top_n_tokens: 0, adapter_id: None, }, response_tx, span: info_span!("entry"), temp_span: None, queue_time: Instant::now(), batch_time: None, block_allocation: None, }; (entry, receiver_tx) } #[tokio::test] async fn test_append() { let mut state = State::new(false, 1, false, None, 0, 16); let (entry, _guard) = default_entry(); assert_eq!(state.next_id, 0); assert_eq!(state.entries.len(), 0); state.append(entry); assert_eq!(state.next_id, 1); assert_eq!(state.entries.len(), 1); let (id, _) = state.entries.remove(0).unwrap(); assert_eq!(id, 0); } #[tokio::test] async fn test_next_batch_empty() { let mut state = State::new(false, 1, false, None, 0, 16); assert!(state.next_batch(None, None, 1, 1).await.is_none()); assert!(state.next_batch(Some(1), None, 1, 1).await.is_none()); } #[tokio::test] async fn test_next_batch_min_size() { let mut state = State::new(false, 1, false, None, 0, 16); let (entry1, _guard1) = default_entry(); let (entry2, _guard2) = default_entry(); state.append(entry1); state.append(entry2); let (entries, batch, _) = state.next_batch(None, None, 2, 2).await.unwrap(); assert_eq!(entries.len(), 2); assert!(entries.contains_key(&0)); assert!(entries.contains_key(&1)); assert!(entries.get(&0).unwrap().batch_time.is_some()); assert!(entries.get(&1).unwrap().batch_time.is_some()); assert_eq!(batch.id, 0); assert_eq!(batch.size, 2); assert_eq!(state.next_id, 2); assert_eq!(state.entries.len(), 0); assert_eq!(state.next_batch_id, 1); let (entry3, _guard3) = default_entry(); state.append(entry3); assert!(state.next_batch(Some(2), None, 2, 2).await.is_none()); assert_eq!(state.next_id, 3); assert_eq!(state.entries.len(), 1); let (id, _) = state.entries.remove(0).unwrap(); assert_eq!(id, 2); } #[tokio::test] async fn test_next_batch_max_size() { let mut state = State::new(false, 1, false, None, 0, 16); let (entry1, _guard1) = default_entry(); let (entry2, _guard2) = default_entry(); state.append(entry1); state.append(entry2); let (entries, batch, _) = state.next_batch(None, Some(1), 2, 2).await.unwrap(); assert_eq!(entries.len(), 1); assert!(entries.contains_key(&0)); assert!(entries.get(&0).unwrap().batch_time.is_some()); assert_eq!(batch.id, 0); assert_eq!(batch.size, 1); assert_eq!(state.next_id, 2); assert_eq!(state.entries.len(), 1); assert_eq!(state.next_batch_id, 1); } #[tokio::test] async fn test_next_batch_token_budget() { let mut state = State::new(false, 1, false, None, 0, 2); let (entry1, _guard1) = default_entry(); let (entry2, _guard2) = default_entry(); state.append(entry1); state.append(entry2); let (entries, batch, _) = state.next_batch(None, None, 1, 1).await.unwrap(); assert_eq!(entries.len(), 1); assert!(entries.contains_key(&0)); assert_eq!(batch.id, 0); assert_eq!(batch.size, 1); assert_eq!(state.next_id, 2); assert_eq!(state.entries.len(), 1); assert_eq!(state.next_batch_id, 1); let (entry3, _guard3) = default_entry(); state.append(entry3); let (entries, batch, _) = state.next_batch(None, None, 3, 3).await.unwrap(); assert_eq!(entries.len(), 2); assert!(entries.contains_key(&1)); assert!(entries.contains_key(&2)); assert_eq!(batch.id, 1); assert_eq!(batch.size, 2); assert_eq!(state.next_id, 3); assert_eq!(state.entries.len(), 0); assert_eq!(state.next_batch_id, 2); } #[tokio::test] async fn test_queue_append() { let queue = Queue::new(false, 1, false, None, 0, 16); let (entry, _guard) = default_entry(); queue.append(entry); } #[tokio::test] async fn test_queue_next_batch_empty() { let queue = Queue::new(false, 1, false, None, 0, 16); assert!(queue.next_batch(None, None, 1, 1).await.is_none()); assert!(queue.next_batch(Some(1), None, 1, 1).await.is_none()); } #[tokio::test] async fn test_queue_next_batch_min_size() { let queue = Queue::new(false, 1, false, None, 0, 16); let (entry1, _guard1) = default_entry(); let (entry2, _guard2) = default_entry(); queue.append(entry1); queue.append(entry2); let (entries, batch, _) = queue.next_batch(None, None, 2, 2).await.unwrap(); assert_eq!(entries.len(), 2); assert!(entries.contains_key(&0)); assert!(entries.contains_key(&1)); assert!(entries.get(&0).unwrap().batch_time.is_some()); assert!(entries.get(&1).unwrap().batch_time.is_some()); assert_eq!(batch.id, 0); assert_eq!(batch.size, 2); let (entry3, _guard3) = default_entry(); queue.append(entry3); // Not enough requests pending assert!(queue.next_batch(Some(2), None, 2, 2).await.is_none()); // Not enough token budget assert!(queue.next_batch(Some(1), None, 0, 0).await.is_none()); // Ok let (entries2, batch2, _) = queue.next_batch(Some(1), None, 2, 2).await.unwrap(); assert_eq!(entries2.len(), 1); assert!(entries2.contains_key(&2)); assert!(entries2.get(&2).unwrap().batch_time.is_some()); assert_eq!(batch2.id, 1); assert_eq!(batch2.size, 1); } #[tokio::test] async fn test_queue_next_batch_max_size() { let queue = Queue::new(false, 1, false, None, 0, 16); let (entry1, _guard1) = default_entry(); let (entry2, _guard2) = default_entry(); queue.append(entry1); queue.append(entry2); let (entries, batch, _) = queue.next_batch(None, Some(1), 2, 2).await.unwrap(); assert_eq!(entries.len(), 1); assert!(entries.contains_key(&0)); assert!(entries.get(&0).unwrap().batch_time.is_some()); assert_eq!(batch.id, 0); assert_eq!(batch.size, 1); } #[tokio::test] async fn test_queue_next_batch_token_budget() { let queue = Queue::new(false, 1, false, None, 0, 16); let (entry1, _guard1) = default_entry(); let (entry2, _guard2) = default_entry(); queue.append(entry1); queue.append(entry2); let (entries, batch, _) = queue.next_batch(None, None, 1, 1).await.unwrap(); assert_eq!(entries.len(), 1); assert!(entries.contains_key(&0)); assert_eq!(batch.id, 0); assert_eq!(batch.size, 1); let (entry3, _guard3) = default_entry(); queue.append(entry3); let (entries, batch, _) = queue.next_batch(None, None, 3, 3).await.unwrap(); assert_eq!(entries.len(), 2); assert!(entries.contains_key(&1)); assert!(entries.contains_key(&2)); assert_eq!(batch.id, 1); assert_eq!(batch.size, 2); } #[tokio::test] async fn test_queue_next_batch_token_speculate() { let queue = Queue::new(false, 1, false, None, 2, 16); let (entry1, _guard1) = default_entry(); let (entry2, _guard2) = default_entry(); queue.append(entry1); queue.append(entry2); // Budget of 1 is not enough assert!(queue.next_batch(None, None, 1, 1).await.is_none()); let (entries, batch, _) = queue.next_batch(None, None, 6, 6).await.unwrap(); assert_eq!(entries.len(), 2); assert!(entries.contains_key(&0)); assert!(entries.contains_key(&1)); assert_eq!(batch.id, 0); assert_eq!(batch.size, 2); } #[tokio::test] async fn test_queue_next_batch_dropped_receiver() { let queue = Queue::new(false, 1, false, None, 0, 16); let (entry, _) = default_entry(); queue.append(entry); assert!(queue.next_batch(None, None, 1, 1).await.is_none()); } }
text-generation-inference/backends/v3/src/queue.rs/0
{ "file_path": "text-generation-inference/backends/v3/src/queue.rs", "repo_id": "text-generation-inference", "token_count": 13802 }
240
import pytest from text_generation import __version__ from huggingface_hub.utils import build_hf_headers @pytest.fixture def flan_t5_xxl(): return "google/flan-t5-xxl" @pytest.fixture def llama_7b(): return "meta-llama/Llama-2-7b-chat-hf" @pytest.fixture def fake_model(): return "fake/model" @pytest.fixture def unsupported_model(): return "gpt2" @pytest.fixture def base_url(): return "https://api-inference.huggingface.co/models" @pytest.fixture def bloom_url(base_url, bloom_model): return f"{base_url}/{bloom_model}" @pytest.fixture def flan_t5_xxl_url(base_url, flan_t5_xxl): return f"{base_url}/{flan_t5_xxl}" @pytest.fixture def llama_7b_url(base_url, llama_7b): return f"{base_url}/{llama_7b}" @pytest.fixture def fake_url(base_url, fake_model): return f"{base_url}/{fake_model}" @pytest.fixture def unsupported_url(base_url, unsupported_model): return f"{base_url}/{unsupported_model}" @pytest.fixture(scope="session") def hf_headers(): return build_hf_headers( library_name="text-generation-tests", library_version=__version__ )
text-generation-inference/clients/python/tests/conftest.py/0
{ "file_path": "text-generation-inference/clients/python/tests/conftest.py", "repo_id": "text-generation-inference", "token_count": 479 }
241
# Serving Private & Gated Models If the model you wish to serve is behind gated access or the model repository on Hugging Face Hub is private, and you have access to the model, you can provide your Hugging Face Hub access token. You can generate and copy a read token from [Hugging Face Hub tokens page](https://huggingface.co/settings/tokens) If you're using the CLI, set the `HF_TOKEN` environment variable. For example: ``` export HF_TOKEN=<YOUR READ TOKEN> ``` If you would like to do it through Docker, you can provide your token by specifying `HF_TOKEN` as shown below. ```bash model=meta-llama/Llama-2-7b-chat-hf volume=$PWD/data token=<your READ token> docker run --gpus all \ --shm-size 1g \ -e HF_TOKEN=$token \ -p 8080:80 \ -v $volume:/data ghcr.io/huggingface/text-generation-inference:2.0.4 \ --model-id $model ```
text-generation-inference/docs/source/basic_tutorials/gated_model_access.md/0
{ "file_path": "text-generation-inference/docs/source/basic_tutorials/gated_model_access.md", "repo_id": "text-generation-inference", "token_count": 290 }
242
# Streaming ## What is Streaming? Token streaming is the mode in which the server returns the tokens one by one as the model generates them. This enables showing progressive generations to the user rather than waiting for the whole generation. Streaming is an essential aspect of the end-user experience as it reduces latency, one of the most critical aspects of a smooth experience. <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/streaming-generation-visual_360.gif" /> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/streaming-generation-visual-dark_360.gif" /> </div> With token streaming, the server can start returning the tokens one by one before having to generate the whole response. Users can have a sense of the generation's quality before the end of the generation. This has different positive effects: * Users can get results orders of magnitude earlier for extremely long queries. * Seeing something in progress allows users to stop the generation if it's not going in the direction they expect. * Perceived latency is lower when results are shown in the early stages. * When used in conversational UIs, the experience feels more natural. For example, a system can generate 100 tokens per second. If the system generates 1000 tokens, with the non-streaming setup, users need to wait 10 seconds to get results. On the other hand, with the streaming setup, users get initial results immediately, and although end-to-end latency will be the same, they can see half of the generation after five seconds. Below you can see an interactive demo that shows non-streaming vs streaming side-by-side. Click **generate** below. <div class="block dark:hidden"> <iframe src="https://osanseviero-streaming-vs-non-streaming.hf.space?__theme=light" width="850" height="350" ></iframe> </div> <div class="hidden dark:block"> <iframe src="https://osanseviero-streaming-vs-non-streaming.hf.space?__theme=dark" width="850" height="350" ></iframe> </div> ## How to use Streaming? ### Streaming with Python To stream tokens with `InferenceClient`, simply pass `stream=True` and iterate over the response. ```python from huggingface_hub import InferenceClient client = InferenceClient(base_url="http://127.0.0.1:8080") output = client.chat.completions.create( messages=[ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "Count to 10"}, ], stream=True, max_tokens=1024, ) for chunk in output: print(chunk.choices[0].delta.content) # 1 # 2 # 3 # 4 # 5 # 6 # 7 # 8 # 9 # 10 ``` The `huggingface_hub` library also comes with an `AsyncInferenceClient` in case you need to handle the requests concurrently. ```python from huggingface_hub import AsyncInferenceClient client = AsyncInferenceClient(base_url="http://127.0.0.1:8080") async def main(): stream = await client.chat.completions.create( messages=[{"role": "user", "content": "Say this is a test"}], stream=True, ) async for chunk in stream: print(chunk.choices[0].delta.content or "", end="") asyncio.run(main()) # This # is # a # test #. ``` ### Streaming with cURL To use the OpenAI Chat Completions compatible Messages API `v1/chat/completions` endpoint with curl, you can add the `-N` flag, which disables curl default buffering and shows data as it arrives from the server ```curl curl localhost:8080/v1/chat/completions \ -X POST \ -d '{ "model": "tgi", "messages": [ { "role": "system", "content": "You are a helpful assistant." }, { "role": "user", "content": "What is deep learning?" } ], "stream": true, "max_tokens": 20 }' \ -H 'Content-Type: application/json' ``` ### Streaming with JavaScript First, we need to install the `@huggingface/inference` library. `npm install @huggingface/inference` If you're using the free Inference API, you can use `HfInference`. If you're using inference endpoints, you can use `HfInferenceEndpoint`. We can create a `HfInferenceEndpoint` providing our endpoint URL and credential. ```js import { HfInferenceEndpoint } from '@huggingface/inference' const hf = new HfInferenceEndpoint('https://YOUR_ENDPOINT.endpoints.huggingface.cloud', 'hf_YOUR_TOKEN') // prompt const prompt = 'What can you do in Nuremberg, Germany? Give me 3 Tips' const stream = hf.textGenerationStream({ inputs: prompt }) for await (const r of stream) { // yield the generated token process.stdout.write(r.token.text) } ``` ## How does Streaming work under the hood? Under the hood, TGI uses Server-Sent Events (SSE). In an SSE Setup, a client sends a request with the data, opening an HTTP connection and subscribing to updates. Afterward, the server sends data to the client. There is no need for further requests; the server will keep sending the data. SSEs are unidirectional, meaning the client does not send other requests to the server. SSE sends data over HTTP, making it easy to use. SSEs are different than: * Polling: where the client keeps calling the server to get data. This means that the server might return empty responses and cause overhead. * Webhooks: where there is a bi-directional connection. The server can send information to the client, but the client can also send data to the server after the first request. Webhooks are more complex to operate as they don’t only use HTTP. If there are too many requests at the same time, TGI returns an HTTP Error with an `overloaded` error type (`huggingface_hub` returns `OverloadedError`). This allows the client to manage the overloaded server (e.g., it could display a busy error to the user or retry with a new request). To configure the maximum number of concurrent requests, you can specify `--max_concurrent_requests`, allowing clients to handle backpressure.
text-generation-inference/docs/source/conceptual/streaming.md/0
{ "file_path": "text-generation-inference/docs/source/conceptual/streaming.md", "repo_id": "text-generation-inference", "token_count": 1890 }
243
{ inputs = { crate2nix = { url = "github:nix-community/crate2nix"; inputs.nixpkgs.follows = "tgi-nix/nixpkgs"; }; nix-filter.url = "github:numtide/nix-filter"; tgi-nix.url = "github:danieldk/tgi-nix"; nixpkgs.follows = "tgi-nix/nixpkgs"; flake-utils.url = "github:numtide/flake-utils"; rust-overlay = { url = "github:oxalica/rust-overlay"; inputs.nixpkgs.follows = "tgi-nix/nixpkgs"; }; }; outputs = { self, crate2nix, nix-filter, nixpkgs, flake-utils, rust-overlay, tgi-nix, }: flake-utils.lib.eachDefaultSystem ( system: let cargoNix = crate2nix.tools.${system}.appliedCargoNix { name = "tgi"; src = ./.; additionalCargoNixArgs = [ "--all-features" ]; }; pkgs = import nixpkgs { inherit system; inherit (tgi-nix.lib) config; overlays = [ rust-overlay.overlays.default tgi-nix.overlays.default ]; }; crateOverrides = import ./nix/crate-overrides.nix { inherit pkgs nix-filter; }; benchmark = cargoNix.workspaceMembers.text-generation-benchmark.build.override { inherit crateOverrides; }; launcher = cargoNix.workspaceMembers.text-generation-launcher.build.override { inherit crateOverrides; }; router = cargoNix.workspaceMembers.text-generation-router-v3.build.override { inherit crateOverrides; }; server = pkgs.python3.pkgs.callPackage ./nix/server.nix { inherit nix-filter; }; in { devShells = with pkgs; rec { default = pure; pure = mkShell { buildInputs = [ benchmark launcher router server ]; }; impure = mkShell { buildInputs = [ openssl.dev pkg-config (rust-bin.stable.latest.default.override { extensions = [ "rust-analyzer" "rust-src" ]; }) protobuf ] ++ (with python3.pkgs; [ venvShellHook pip ipdb ]); inputsFrom = [ server ]; venvDir = "./.venv"; postVenv = '' unset SOURCE_DATE_EPOCH ''; postShellHook = '' unset SOURCE_DATE_EPOCH export PATH=$PATH:~/.cargo/bin ''; }; }; packages.default = pkgs.writeShellApplication { name = "text-generation-inference"; runtimeInputs = [ server router ]; text = '' ${launcher}/bin/text-generation-launcher "$@" ''; }; } ); }
text-generation-inference/flake.nix/0
{ "file_path": "text-generation-inference/flake.nix", "repo_id": "text-generation-inference", "token_count": 1705 }
244
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 2061, "logprob": null, "text": "What" }, { "id": 318, "logprob": -3.1835938, "text": " is" }, { "id": 2769, "logprob": -9.171875, "text": " deep" }, { "id": 4673, "logprob": -1.6425781, "text": " learning" }, { "id": 30, "logprob": -0.7314453, "text": "?" } ], "seed": null, "tokens": [ { "id": 198, "logprob": -0.68603516, "special": false, "text": "\n" }, { "id": 198, "logprob": -0.005393982, "special": false, "text": "\n" }, { "id": 29744, "logprob": -0.31079102, "special": false, "text": "Deep" }, { "id": 4673, "logprob": -0.08300781, "special": false, "text": " learning" }, { "id": 318, "logprob": -0.58984375, "special": false, "text": " is" }, { "id": 257, "logprob": -0.953125, "special": false, "text": " a" }, { "id": 649, "logprob": -2.0957031, "special": false, "text": " new" }, { "id": 2214, "logprob": -1.8095703, "special": false, "text": " field" }, { "id": 286, "logprob": -1.0673828, "special": false, "text": " of" }, { "id": 2267, "logprob": -0.9375, "special": false, "text": " research" } ], "top_tokens": null }, "generated_text": "\n\nDeep learning is a new field of research" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_gpt2/test_flash_gpt2.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_gpt2/test_flash_gpt2.json", "repo_id": "text-generation-inference", "token_count": 1172 }
245
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 1247, "logprob": -2.390625, "text": "User" }, { "id": 28747, "logprob": -12.328125, "text": ":" }, { "id": 32000, "logprob": -10.4375, "text": "<image>" }, { "id": 32000, "logprob": -10.6953125, "text": "<image>" }, { "id": 32000, "logprob": -15.828125, "text": "<image>" }, { "id": 32000, "logprob": -10.40625, "text": "<image>" }, { "id": 32000, "logprob": -10.03125, "text": "<image>" }, { "id": 32000, "logprob": -10.140625, "text": "<image>" }, { "id": 32000, "logprob": -10.3125, "text": "<image>" }, { "id": 32000, "logprob": -12.6640625, "text": "<image>" }, { "id": 32000, "logprob": -15.4921875, "text": "<image>" }, { "id": 32000, "logprob": -13.8671875, "text": "<image>" }, { "id": 32000, "logprob": -11.3671875, "text": "<image>" }, { "id": 32000, "logprob": -10.1015625, "text": "<image>" }, { "id": 32000, "logprob": -10.1796875, "text": "<image>" }, { "id": 32000, "logprob": -10.2421875, "text": "<image>" }, { "id": 32000, "logprob": -10.4609375, "text": "<image>" }, { "id": 32000, "logprob": -14.34375, "text": "<image>" }, { "id": 32000, "logprob": -13.1796875, "text": "<image>" }, { "id": 32000, "logprob": -13.328125, "text": "<image>" }, { "id": 32000, "logprob": -14.3515625, "text": "<image>" }, { "id": 32000, "logprob": -11.0546875, "text": "<image>" }, { "id": 32000, "logprob": -10.9140625, "text": "<image>" }, { "id": 32000, "logprob": -10.515625, "text": "<image>" }, { "id": 32000, "logprob": -10.4453125, "text": "<image>" }, { "id": 32000, "logprob": -14.2421875, "text": "<image>" }, { "id": 32000, "logprob": -13.59375, "text": "<image>" }, { "id": 32000, "logprob": -11.234375, "text": "<image>" }, { "id": 32000, "logprob": -10.8125, "text": "<image>" }, { "id": 32000, "logprob": -17.234375, "text": "<image>" }, { "id": 32000, "logprob": -10.3984375, "text": "<image>" }, { "id": 32000, "logprob": -10.390625, "text": "<image>" }, { "id": 32000, "logprob": -10.6484375, "text": "<image>" }, { "id": 32000, "logprob": -17.984375, "text": "<image>" }, { "id": 32000, "logprob": -10.84375, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -9.8671875, "text": "<image>" }, { "id": 32000, "logprob": -10.7578125, "text": "<image>" }, { "id": 32000, "logprob": -10.5390625, "text": "<image>" }, { "id": 32000, "logprob": -10.984375, "text": "<image>" }, { "id": 32000, "logprob": -10.625, "text": "<image>" }, { "id": 32000, "logprob": -11.109375, "text": "<image>" }, { "id": 32000, "logprob": -10.5, "text": "<image>" }, { "id": 32000, "logprob": -11.4609375, "text": "<image>" }, { "id": 32000, "logprob": -13.09375, "text": "<image>" }, { "id": 32000, "logprob": -10.8203125, "text": "<image>" }, { "id": 32000, "logprob": -10.5390625, "text": "<image>" }, { "id": 32000, "logprob": -10.625, "text": "<image>" }, { "id": 32000, "logprob": -10.9296875, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -10.578125, "text": "<image>" }, { "id": 32000, "logprob": -10.7265625, "text": "<image>" }, { "id": 32000, "logprob": -11.40625, "text": "<image>" }, { "id": 32000, "logprob": -10.359375, "text": "<image>" }, { "id": 32000, "logprob": -11.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.0390625, "text": "<image>" }, { "id": 32000, "logprob": -18.703125, "text": "<image>" }, { "id": 32000, "logprob": -11.015625, "text": "<image>" }, { "id": 32000, "logprob": -15.6796875, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -10.4140625, "text": "<image>" }, { "id": 32000, "logprob": -13.015625, "text": "<image>" }, { "id": 32000, "logprob": -11.8046875, "text": "<image>" }, { "id": 32000, "logprob": -10.5703125, "text": "<image>" }, { "id": 32000, "logprob": -10.8125, "text": "<image>" }, { "id": 32000, "logprob": -10.8046875, "text": "<image>" }, { "id": 32000, "logprob": -10.2734375, "text": "<image>" }, { "id": 32000, "logprob": -10.234375, "text": "<image>" }, { "id": 32000, "logprob": -10.265625, "text": "<image>" }, { "id": 32000, "logprob": -11.140625, "text": "<image>" }, { "id": 32000, "logprob": -12.8125, "text": "<image>" }, { "id": 32000, "logprob": -10.671875, "text": "<image>" }, { "id": 32000, "logprob": -11.40625, "text": "<image>" }, { "id": 32000, "logprob": -12.8046875, "text": "<image>" }, { "id": 32000, "logprob": -10.8125, "text": "<image>" }, { "id": 32000, "logprob": -11.15625, "text": "<image>" }, { "id": 32000, "logprob": -10.5234375, "text": "<image>" }, { "id": 32000, "logprob": -15.3203125, "text": "<image>" }, { "id": 32000, "logprob": -10.2265625, "text": "<image>" }, { "id": 32000, "logprob": -11.0546875, "text": "<image>" }, { "id": 32000, "logprob": -14.671875, "text": "<image>" }, { "id": 32000, "logprob": -10.375, "text": "<image>" }, { "id": 32000, "logprob": -11.34375, "text": "<image>" }, { "id": 32000, "logprob": -12.5234375, "text": "<image>" }, { "id": 32000, "logprob": -12.4921875, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -11.0859375, "text": "<image>" }, { "id": 32000, "logprob": -10.9765625, "text": "<image>" }, { "id": 32000, "logprob": -10.921875, "text": "<image>" }, { "id": 32000, "logprob": -11.0234375, "text": "<image>" }, { "id": 32000, "logprob": -10.96875, "text": "<image>" }, { "id": 32000, "logprob": -11.1484375, "text": "<image>" }, { "id": 32000, "logprob": -10.71875, "text": "<image>" }, { "id": 32000, "logprob": -10.734375, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -10.890625, "text": "<image>" }, { "id": 32000, "logprob": -13.375, "text": "<image>" }, { "id": 32000, "logprob": -10.984375, "text": "<image>" }, { "id": 32000, "logprob": -10.6640625, "text": "<image>" }, { "id": 32000, "logprob": -10.7265625, "text": "<image>" }, { "id": 32000, "logprob": -10.75, "text": "<image>" }, { "id": 32000, "logprob": -10.640625, "text": "<image>" }, { "id": 32000, "logprob": -10.6796875, "text": "<image>" }, { "id": 32000, "logprob": -11.828125, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -12.2265625, "text": "<image>" }, { "id": 32000, "logprob": -10.9765625, "text": "<image>" }, { "id": 32000, "logprob": -12.5, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -11.2421875, "text": "<image>" }, { "id": 32000, "logprob": -11.890625, "text": "<image>" }, { "id": 32000, "logprob": -11.2265625, "text": "<image>" }, { "id": 32000, "logprob": -9.984375, "text": "<image>" }, { "id": 32000, "logprob": -10.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -11.109375, "text": "<image>" }, { "id": 32000, "logprob": -14.03125, "text": "<image>" }, { "id": 32000, "logprob": -10.8984375, "text": "<image>" }, { "id": 32000, "logprob": -10.953125, "text": "<image>" }, { "id": 32000, "logprob": -10.9296875, "text": "<image>" }, { "id": 32000, "logprob": -11.78125, "text": "<image>" }, { "id": 32000, "logprob": -15.1484375, "text": "<image>" }, { "id": 32000, "logprob": -10.96875, "text": "<image>" }, { "id": 32000, "logprob": -10.890625, "text": "<image>" }, { "id": 32000, "logprob": -10.890625, "text": "<image>" }, { "id": 32000, "logprob": -10.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.453125, "text": "<image>" }, { "id": 32000, "logprob": -13.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.484375, "text": "<image>" }, { "id": 32000, "logprob": -11.359375, "text": "<image>" }, { "id": 32000, "logprob": -12.3359375, "text": "<image>" }, { "id": 32000, "logprob": -11.59375, "text": "<image>" }, { "id": 32000, "logprob": -12.5, "text": "<image>" }, { "id": 32000, "logprob": -12.6328125, "text": "<image>" }, { "id": 32000, "logprob": -11.203125, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -10.9296875, "text": "<image>" }, { "id": 32000, "logprob": -10.7421875, "text": "<image>" }, { "id": 32000, "logprob": -15.390625, "text": "<image>" }, { "id": 32000, "logprob": -15.0234375, "text": "<image>" }, { "id": 32000, "logprob": -12.75, "text": "<image>" }, { "id": 32000, "logprob": -12.421875, "text": "<image>" }, { "id": 32000, "logprob": -12.2109375, "text": "<image>" }, { "id": 32000, "logprob": -11.265625, "text": "<image>" }, { "id": 32000, "logprob": -13.359375, "text": "<image>" }, { "id": 32000, "logprob": -10.640625, "text": "<image>" }, { "id": 32000, "logprob": -10.5859375, "text": "<image>" }, { "id": 32000, "logprob": -11.7578125, "text": "<image>" }, { "id": 32000, "logprob": -11.734375, "text": "<image>" }, { "id": 32000, "logprob": -10.421875, "text": "<image>" }, { "id": 32000, "logprob": -10.5859375, "text": "<image>" }, { "id": 32000, "logprob": -10.4609375, "text": "<image>" }, { "id": 32000, "logprob": -17.53125, "text": "<image>" }, { "id": 32000, "logprob": -11.4765625, "text": "<image>" }, { "id": 32000, "logprob": -14.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.0078125, "text": "<image>" }, { "id": 32000, "logprob": -12.8359375, "text": "<image>" }, { "id": 32000, "logprob": -11.25, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -12.6875, "text": "<image>" }, { "id": 32000, "logprob": -11.109375, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -10.3359375, "text": "<image>" }, { "id": 32000, "logprob": -13.46875, "text": "<image>" }, { "id": 32000, "logprob": -11.8671875, "text": "<image>" }, { "id": 32000, "logprob": -10.84375, "text": "<image>" }, { "id": 32000, "logprob": -11.0078125, "text": "<image>" }, { "id": 32000, "logprob": -11.234375, "text": "<image>" }, { "id": 32000, "logprob": -13.3359375, "text": "<image>" }, { "id": 32000, "logprob": -10.5546875, "text": "<image>" }, { "id": 32000, "logprob": -10.6796875, "text": "<image>" }, { "id": 32000, "logprob": -10.8203125, "text": "<image>" }, { "id": 32000, "logprob": -10.9921875, "text": "<image>" }, { "id": 32000, "logprob": -10.9921875, "text": "<image>" }, { "id": 32000, "logprob": -10.6328125, "text": "<image>" }, { "id": 32000, "logprob": -12.2578125, "text": "<image>" }, { "id": 32000, "logprob": -10.8203125, "text": "<image>" }, { "id": 32000, "logprob": -12.265625, "text": "<image>" }, { "id": 32000, "logprob": -12.25, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -10.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.8125, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -12.828125, "text": "<image>" }, { "id": 32000, "logprob": -11.328125, "text": "<image>" }, { "id": 32000, "logprob": -10.296875, "text": "<image>" }, { "id": 32000, "logprob": -10.796875, "text": "<image>" }, { "id": 32000, "logprob": -11.0390625, "text": "<image>" }, { "id": 32000, "logprob": -11.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.2109375, "text": "<image>" }, { "id": 32000, "logprob": -11.5078125, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -10.5546875, "text": "<image>" }, { "id": 32000, "logprob": -10.3515625, "text": "<image>" }, { "id": 32000, "logprob": -10.609375, "text": "<image>" }, { "id": 32000, "logprob": -10.796875, "text": "<image>" }, { "id": 32000, "logprob": -10.828125, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -11.9375, "text": "<image>" }, { "id": 32000, "logprob": -12.7109375, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -11.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -13.3984375, "text": "<image>" }, { "id": 32000, "logprob": -11.53125, "text": "<image>" }, { "id": 32000, "logprob": -14.1875, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -12.421875, "text": "<image>" }, { "id": 32000, "logprob": -10.90625, "text": "<image>" }, { "id": 32000, "logprob": -10.5859375, "text": "<image>" }, { "id": 32000, "logprob": -10.6015625, "text": "<image>" }, { "id": 32000, "logprob": -10.90625, "text": "<image>" }, { "id": 32000, "logprob": -11.2265625, "text": "<image>" }, { "id": 32000, "logprob": -10.90625, "text": "<image>" }, { "id": 32000, "logprob": -10.9765625, "text": "<image>" }, { "id": 32000, "logprob": -12.03125, "text": "<image>" }, { "id": 32000, "logprob": -10.8671875, "text": "<image>" }, { "id": 32000, "logprob": -10.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.8984375, "text": "<image>" }, { "id": 32000, "logprob": -10.7421875, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -11.4375, "text": "<image>" }, { "id": 32000, "logprob": -11.28125, "text": "<image>" }, { "id": 32000, "logprob": -10.9296875, "text": "<image>" }, { "id": 32000, "logprob": -11.5078125, "text": "<image>" }, { "id": 32000, "logprob": -13.578125, "text": "<image>" }, { "id": 32000, "logprob": -13.875, "text": "<image>" }, { "id": 32000, "logprob": -14.3828125, "text": "<image>" }, { "id": 32000, "logprob": -11.3515625, "text": "<image>" }, { "id": 32000, "logprob": -13.3125, "text": "<image>" }, { "id": 32000, "logprob": -10.8828125, "text": "<image>" }, { "id": 32000, "logprob": -12.703125, "text": "<image>" }, { "id": 32000, "logprob": -10.953125, "text": "<image>" }, { "id": 32000, "logprob": -10.7734375, "text": "<image>" }, { "id": 32000, "logprob": -12.5, "text": "<image>" }, { "id": 32000, "logprob": -11.390625, "text": "<image>" }, { "id": 32000, "logprob": -11.140625, "text": "<image>" }, { "id": 32000, "logprob": -10.8515625, "text": "<image>" }, { "id": 32000, "logprob": -12.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.75, "text": "<image>" }, { "id": 32000, "logprob": -10.8671875, "text": "<image>" }, { "id": 32000, "logprob": -13.8046875, "text": "<image>" }, { "id": 32000, "logprob": -11.828125, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -11.3984375, "text": "<image>" }, { "id": 32000, "logprob": -14.2890625, "text": "<image>" }, { "id": 32000, "logprob": -13.578125, "text": "<image>" }, { "id": 32000, "logprob": -13.46875, "text": "<image>" }, { "id": 32000, "logprob": -11.8828125, "text": "<image>" }, { "id": 32000, "logprob": -12.84375, "text": "<image>" }, { "id": 32000, "logprob": -14.90625, "text": "<image>" }, { "id": 32000, "logprob": -13.3125, "text": "<image>" }, { "id": 32000, "logprob": -11.140625, "text": "<image>" }, { "id": 32000, "logprob": -13.0703125, "text": "<image>" }, { "id": 32000, "logprob": -12.4296875, "text": "<image>" }, { "id": 32000, "logprob": -12.8671875, "text": "<image>" }, { "id": 32000, "logprob": -11.1953125, "text": "<image>" }, { "id": 32000, "logprob": -15.140625, "text": "<image>" }, { "id": 32000, "logprob": -12.625, "text": "<image>" }, { "id": 32000, "logprob": -13.1875, "text": "<image>" }, { "id": 32000, "logprob": -12.0859375, "text": "<image>" }, { "id": 32000, "logprob": -12.1875, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -11.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.9375, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -11.7421875, "text": "<image>" }, { "id": 32000, "logprob": -11.828125, "text": "<image>" }, { "id": 32000, "logprob": -12.1953125, "text": "<image>" }, { "id": 32000, "logprob": -12.4921875, "text": "<image>" }, { "id": 32000, "logprob": -11.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.0859375, "text": "<image>" }, { "id": 32000, "logprob": -11.265625, "text": "<image>" }, { "id": 32000, "logprob": -11.6953125, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -13.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.7109375, "text": "<image>" }, { "id": 32000, "logprob": -15.03125, "text": "<image>" }, { "id": 32000, "logprob": -12.5390625, "text": "<image>" }, { "id": 32000, "logprob": -11.859375, "text": "<image>" }, { "id": 32000, "logprob": -13.1171875, "text": "<image>" }, { "id": 32000, "logprob": -12.296875, "text": "<image>" }, { "id": 32000, "logprob": -12.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.1640625, "text": "<image>" }, { "id": 32000, "logprob": -12.96875, "text": "<image>" }, { "id": 32000, "logprob": -11.5078125, "text": "<image>" }, { "id": 32000, "logprob": -14.921875, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.6484375, "text": "<image>" }, { "id": 32000, "logprob": -11.7890625, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -14.015625, "text": "<image>" }, { "id": 32000, "logprob": -13.046875, "text": "<image>" }, { "id": 32000, "logprob": -11.7265625, "text": "<image>" }, { "id": 32000, "logprob": -13.1484375, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -13.8515625, "text": "<image>" }, { "id": 32000, "logprob": -13.78125, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -13.359375, "text": "<image>" }, { "id": 32000, "logprob": -13.2421875, "text": "<image>" }, { "id": 32000, "logprob": -13.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.859375, "text": "<image>" }, { "id": 32000, "logprob": -13.5625, "text": "<image>" }, { "id": 32000, "logprob": -13.6484375, "text": "<image>" }, { "id": 32000, "logprob": -13.6640625, "text": "<image>" }, { "id": 32000, "logprob": -11.4609375, "text": "<image>" }, { "id": 32000, "logprob": -13.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -14.6328125, "text": "<image>" }, { "id": 32000, "logprob": -11.78125, "text": "<image>" }, { "id": 32000, "logprob": -13.3984375, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -14.40625, "text": "<image>" }, { "id": 32000, "logprob": -13.5390625, "text": "<image>" }, { "id": 32000, "logprob": -14.5078125, "text": "<image>" }, { "id": 32000, "logprob": -12.7109375, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -13.1640625, "text": "<image>" }, { "id": 32000, "logprob": -13.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.875, "text": "<image>" }, { "id": 32000, "logprob": -12.1484375, "text": "<image>" }, { "id": 32000, "logprob": -12.6484375, "text": "<image>" }, { "id": 32000, "logprob": -13.6953125, "text": "<image>" }, { "id": 32000, "logprob": -12.4140625, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -13.109375, "text": "<image>" }, { "id": 32000, "logprob": -12.1796875, "text": "<image>" }, { "id": 32000, "logprob": -14.875, "text": "<image>" }, { "id": 32000, "logprob": -16.359375, "text": "<image>" }, { "id": 32000, "logprob": -11.34375, "text": "<image>" }, { "id": 32000, "logprob": -11.75, "text": "<image>" }, { "id": 32000, "logprob": -11.7890625, "text": "<image>" }, { "id": 32000, "logprob": -15.765625, "text": "<image>" }, { "id": 32000, "logprob": -15.0859375, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -11.875, "text": "<image>" }, { "id": 32000, "logprob": -13.25, "text": "<image>" }, { "id": 32000, "logprob": -13.5078125, "text": "<image>" }, { "id": 32000, "logprob": -13.1640625, "text": "<image>" }, { "id": 32000, "logprob": -14.265625, "text": "<image>" }, { "id": 32000, "logprob": -14.5859375, "text": "<image>" }, { "id": 32000, "logprob": -13.2421875, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -13.8046875, "text": "<image>" }, { "id": 32000, "logprob": -12.703125, "text": "<image>" }, { "id": 32000, "logprob": -12.0, "text": "<image>" }, { "id": 32000, "logprob": -12.2421875, "text": "<image>" }, { "id": 32000, "logprob": -16.203125, "text": "<image>" }, { "id": 32000, "logprob": -11.5859375, "text": "<image>" }, { "id": 32000, "logprob": -15.78125, "text": "<image>" }, { "id": 32000, "logprob": -12.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.953125, "text": "<image>" }, { "id": 32000, "logprob": -11.0, "text": "<image>" }, { "id": 32000, "logprob": -11.7109375, "text": "<image>" }, { "id": 32000, "logprob": -14.6875, "text": "<image>" }, { "id": 32000, "logprob": -11.34375, "text": "<image>" }, { "id": 32000, "logprob": -13.296875, "text": "<image>" }, { "id": 32000, "logprob": -12.609375, "text": "<image>" }, { "id": 32000, "logprob": -14.2265625, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -14.1640625, "text": "<image>" }, { "id": 32000, "logprob": -12.0234375, "text": "<image>" }, { "id": 32000, "logprob": -12.1953125, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.890625, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -15.1953125, "text": "<image>" }, { "id": 32000, "logprob": -11.546875, "text": "<image>" }, { "id": 32000, "logprob": -14.4375, "text": "<image>" }, { "id": 32000, "logprob": -12.265625, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.875, "text": "<image>" }, { "id": 32000, "logprob": -13.7265625, "text": "<image>" }, { "id": 32000, "logprob": -13.5390625, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -13.046875, "text": "<image>" }, { "id": 32000, "logprob": -13.953125, "text": "<image>" }, { "id": 32000, "logprob": -10.9765625, "text": "<image>" }, { "id": 32000, "logprob": -12.5078125, "text": "<image>" }, { "id": 32000, "logprob": -12.7109375, "text": "<image>" }, { "id": 32000, "logprob": -12.1796875, "text": "<image>" }, { "id": 32000, "logprob": -12.578125, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -13.4609375, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -13.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.75, "text": "<image>" }, { "id": 32000, "logprob": -15.34375, "text": "<image>" }, { "id": 32000, "logprob": -13.1875, "text": "<image>" }, { "id": 32000, "logprob": -10.3359375, "text": "<image>" }, { "id": 32000, "logprob": -13.3515625, "text": "<image>" }, { "id": 32000, "logprob": -11.8828125, "text": "<image>" }, { "id": 32000, "logprob": -12.578125, "text": "<image>" }, { "id": 32000, "logprob": -13.5078125, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -10.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.6015625, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -11.78125, "text": "<image>" }, { "id": 32000, "logprob": -11.984375, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -13.390625, "text": "<image>" }, { "id": 32000, "logprob": -12.96875, "text": "<image>" }, { "id": 32000, "logprob": -12.484375, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -13.296875, "text": "<image>" }, { "id": 32000, "logprob": -12.1171875, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -13.1328125, "text": "<image>" }, { "id": 32000, "logprob": -13.625, "text": "<image>" }, { "id": 32000, "logprob": -11.421875, "text": "<image>" }, { "id": 32000, "logprob": -11.75, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -12.3359375, "text": "<image>" }, { "id": 32000, "logprob": -12.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -13.96875, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -13.484375, "text": "<image>" }, { "id": 32000, "logprob": -12.828125, "text": "<image>" }, { "id": 32000, "logprob": -13.1640625, "text": "<image>" }, { "id": 32000, "logprob": -13.1640625, "text": "<image>" }, { "id": 32000, "logprob": -12.7734375, "text": "<image>" }, { "id": 32000, "logprob": -11.7421875, "text": "<image>" }, { "id": 32000, "logprob": -12.375, "text": "<image>" }, { "id": 32000, "logprob": -12.265625, "text": "<image>" }, { "id": 32000, "logprob": -14.6328125, "text": "<image>" }, { "id": 32000, "logprob": -12.921875, "text": "<image>" }, { "id": 32000, "logprob": -12.625, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.3828125, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.0703125, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -12.6171875, "text": "<image>" }, { "id": 32000, "logprob": -13.84375, "text": "<image>" }, { "id": 32000, "logprob": -12.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.140625, "text": "<image>" }, { "id": 32000, "logprob": -11.5078125, "text": "<image>" }, { "id": 32000, "logprob": -11.2734375, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -13.5859375, "text": "<image>" }, { "id": 32000, "logprob": -11.7890625, "text": "<image>" }, { "id": 32000, "logprob": -11.875, "text": "<image>" }, { "id": 32000, "logprob": -13.0625, "text": "<image>" }, { "id": 32000, "logprob": -13.1875, "text": "<image>" }, { "id": 32000, "logprob": -13.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.46875, "text": "<image>" }, { "id": 32000, "logprob": -12.34375, "text": "<image>" }, { "id": 32000, "logprob": -12.6953125, "text": "<image>" }, { "id": 32000, "logprob": -11.6875, "text": "<image>" }, { "id": 32000, "logprob": -11.6953125, "text": "<image>" }, { "id": 32000, "logprob": -11.7109375, "text": "<image>" }, { "id": 32000, "logprob": -15.3359375, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -16.125, "text": "<image>" }, { "id": 32000, "logprob": -15.4609375, "text": "<image>" }, { "id": 32000, "logprob": -13.921875, "text": "<image>" }, { "id": 32000, "logprob": -11.609375, "text": "<image>" }, { "id": 32000, "logprob": -11.78125, "text": "<image>" }, { "id": 32000, "logprob": -16.0, "text": "<image>" }, { "id": 32000, "logprob": -11.6484375, "text": "<image>" }, { "id": 32000, "logprob": -12.828125, "text": "<image>" }, { "id": 32000, "logprob": -12.5078125, "text": "<image>" }, { "id": 32000, "logprob": -11.734375, "text": "<image>" }, { "id": 32000, "logprob": -14.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.34375, "text": "<image>" }, { "id": 32000, "logprob": -11.7265625, "text": "<image>" }, { "id": 32000, "logprob": -13.046875, "text": "<image>" }, { "id": 32000, "logprob": -14.8984375, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -14.265625, "text": "<image>" }, { "id": 32000, "logprob": -12.109375, "text": "<image>" }, { "id": 32000, "logprob": -12.6953125, "text": "<image>" }, { "id": 32000, "logprob": -12.1953125, "text": "<image>" }, { "id": 32000, "logprob": -12.984375, "text": "<image>" }, { "id": 32000, "logprob": -13.2890625, "text": "<image>" }, { "id": 32000, "logprob": -13.0390625, "text": "<image>" }, { "id": 32000, "logprob": -11.96875, "text": "<image>" }, { "id": 32000, "logprob": -10.3984375, "text": "<image>" }, { "id": 32000, "logprob": -11.53125, "text": "<image>" }, { "id": 32000, "logprob": -14.0546875, "text": "<image>" }, { "id": 32000, "logprob": -11.5234375, "text": "<image>" }, { "id": 32000, "logprob": -11.109375, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -12.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.1796875, "text": "<image>" }, { "id": 32000, "logprob": -13.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.796875, "text": "<image>" }, { "id": 32000, "logprob": -11.625, "text": "<image>" }, { "id": 32000, "logprob": -13.59375, "text": "<image>" }, { "id": 32000, "logprob": -11.890625, "text": "<image>" }, { "id": 32000, "logprob": -14.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.5703125, "text": "<image>" }, { "id": 32000, "logprob": -12.7734375, "text": "<image>" }, { "id": 32000, "logprob": -11.375, "text": "<image>" }, { "id": 32000, "logprob": -14.03125, "text": "<image>" }, { "id": 32000, "logprob": -11.5546875, "text": "<image>" }, { "id": 32000, "logprob": -11.7578125, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -14.53125, "text": "<image>" }, { "id": 32000, "logprob": -12.609375, "text": "<image>" }, { "id": 32000, "logprob": -14.609375, "text": "<image>" }, { "id": 32000, "logprob": -13.59375, "text": "<image>" }, { "id": 32000, "logprob": -13.2109375, "text": "<image>" }, { "id": 32000, "logprob": -12.7734375, "text": "<image>" }, { "id": 32000, "logprob": -12.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.6875, "text": "<image>" }, { "id": 32000, "logprob": -12.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.484375, "text": "<image>" }, { "id": 32000, "logprob": -15.609375, "text": "<image>" }, { "id": 32000, "logprob": -11.859375, "text": "<image>" }, { "id": 32000, "logprob": -13.078125, "text": "<image>" }, { "id": 32000, "logprob": -12.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -11.546875, "text": "<image>" }, { "id": 32000, "logprob": -12.1171875, "text": "<image>" }, { "id": 32000, "logprob": -11.578125, "text": "<image>" }, { "id": 32000, "logprob": -12.5, "text": "<image>" }, { "id": 32000, "logprob": -15.5390625, "text": "<image>" }, { "id": 32000, "logprob": -14.2578125, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -13.8671875, "text": "<image>" }, { "id": 32000, "logprob": -13.2265625, "text": "<image>" }, { "id": 32000, "logprob": -14.8046875, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -11.8671875, "text": "<image>" }, { "id": 32000, "logprob": -13.1484375, "text": "<image>" }, { "id": 32000, "logprob": -12.4140625, "text": "<image>" }, { "id": 32000, "logprob": -10.671875, "text": "<image>" }, { "id": 32000, "logprob": -14.6875, "text": "<image>" }, { "id": 32000, "logprob": -15.7734375, "text": "<image>" }, { "id": 32000, "logprob": -15.453125, "text": "<image>" }, { "id": 32000, "logprob": -11.234375, "text": "<image>" }, { "id": 32000, "logprob": -14.484375, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -16.234375, "text": "<image>" }, { "id": 32000, "logprob": -14.875, "text": "<image>" }, { "id": 32000, "logprob": -16.109375, "text": "<image>" }, { "id": 32000, "logprob": -13.2890625, "text": "<image>" }, { "id": 32000, "logprob": -12.5546875, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.9921875, "text": "<image>" }, { "id": 32000, "logprob": -11.2734375, "text": "<image>" }, { "id": 32000, "logprob": -12.1484375, "text": "<image>" }, { "id": 32000, "logprob": -13.84375, "text": "<image>" }, { "id": 32000, "logprob": -13.3984375, "text": "<image>" }, { "id": 32000, "logprob": -12.3203125, "text": "<image>" }, { "id": 32000, "logprob": -11.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -11.984375, "text": "<image>" }, { "id": 32000, "logprob": -15.140625, "text": "<image>" }, { "id": 32000, "logprob": -10.828125, "text": "<image>" }, { "id": 32000, "logprob": -13.171875, "text": "<image>" }, { "id": 32000, "logprob": -15.4453125, "text": "<image>" }, { "id": 32000, "logprob": -11.1015625, "text": "<image>" }, { "id": 32000, "logprob": -10.5078125, "text": "<image>" }, { "id": 32000, "logprob": -11.859375, "text": "<image>" }, { "id": 32000, "logprob": -11.0, "text": "<image>" }, { "id": 32000, "logprob": -11.2890625, "text": "<image>" }, { "id": 32000, "logprob": -11.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.4921875, "text": "<image>" }, { "id": 32000, "logprob": -16.46875, "text": "<image>" }, { "id": 32000, "logprob": -13.46875, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -11.1953125, "text": "<image>" }, { "id": 32000, "logprob": -11.3359375, "text": "<image>" }, { "id": 32000, "logprob": -12.1640625, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -11.8671875, "text": "<image>" }, { "id": 32000, "logprob": -9.9140625, "text": "<image>" }, { "id": 32000, "logprob": -11.265625, "text": "<image>" }, { "id": 32000, "logprob": -12.1875, "text": "<image>" }, { "id": 32000, "logprob": -14.265625, "text": "<image>" }, { "id": 32000, "logprob": -12.15625, "text": "<image>" }, { "id": 32000, "logprob": -11.65625, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -11.8984375, "text": "<image>" }, { "id": 32000, "logprob": -14.859375, "text": "<image>" }, { "id": 32000, "logprob": -14.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.8671875, "text": "<image>" }, { "id": 32000, "logprob": -10.59375, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -12.8984375, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -15.765625, "text": "<image>" }, { "id": 32000, "logprob": -15.203125, "text": "<image>" }, { "id": 32000, "logprob": -14.25, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -12.34375, "text": "<image>" }, { "id": 32000, "logprob": -12.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.0078125, "text": "<image>" }, { "id": 32000, "logprob": -11.296875, "text": "<image>" }, { "id": 32000, "logprob": -13.2734375, "text": "<image>" }, { "id": 32000, "logprob": -11.7578125, "text": "<image>" }, { "id": 32000, "logprob": -13.09375, "text": "<image>" }, { "id": 32000, "logprob": -13.6796875, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.421875, "text": "<image>" }, { "id": 32000, "logprob": -11.984375, "text": "<image>" }, { "id": 32000, "logprob": -12.2890625, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -13.125, "text": "<image>" }, { "id": 32000, "logprob": -15.9453125, "text": "<image>" }, { "id": 32000, "logprob": -13.140625, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.5234375, "text": "<image>" }, { "id": 32000, "logprob": -11.328125, "text": "<image>" }, { "id": 32000, "logprob": -11.5, "text": "<image>" }, { "id": 32000, "logprob": -11.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.3359375, "text": "<image>" }, { "id": 32000, "logprob": -9.7109375, "text": "<image>" }, { "id": 32000, "logprob": -11.765625, "text": "<image>" }, { "id": 32000, "logprob": -14.2421875, "text": "<image>" }, { "id": 32000, "logprob": -11.96875, "text": "<image>" }, { "id": 32000, "logprob": -12.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.5234375, "text": "<image>" }, { "id": 32000, "logprob": -12.1953125, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.84375, "text": "<image>" }, { "id": 32000, "logprob": -12.0, "text": "<image>" }, { "id": 32000, "logprob": -12.1953125, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.7109375, "text": "<image>" }, { "id": 32000, "logprob": -13.0859375, "text": "<image>" }, { "id": 32000, "logprob": -13.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.7421875, "text": "<image>" }, { "id": 32000, "logprob": -11.953125, "text": "<image>" }, { "id": 32000, "logprob": -13.8046875, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.0859375, "text": "<image>" }, { "id": 32000, "logprob": -10.015625, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -13.8125, "text": "<image>" }, { "id": 32000, "logprob": -12.109375, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -12.5703125, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -12.453125, "text": "<image>" }, { "id": 32000, "logprob": -12.2890625, "text": "<image>" }, { "id": 32000, "logprob": -12.46875, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.015625, "text": "<image>" }, { "id": 32000, "logprob": -12.765625, "text": "<image>" }, { "id": 32000, "logprob": -14.421875, "text": "<image>" }, { "id": 32000, "logprob": -13.25, "text": "<image>" }, { "id": 32000, "logprob": -16.53125, "text": "<image>" }, { "id": 32000, "logprob": -14.484375, "text": "<image>" }, { "id": 32000, "logprob": -13.7421875, "text": "<image>" }, { "id": 32000, "logprob": -13.46875, "text": "<image>" }, { "id": 32000, "logprob": -13.046875, "text": "<image>" }, { "id": 32000, "logprob": -12.8671875, "text": "<image>" }, { "id": 32000, "logprob": -12.4921875, "text": "<image>" }, { "id": 32000, "logprob": -13.2265625, "text": "<image>" }, { "id": 32000, "logprob": -13.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -10.765625, "text": "<image>" }, { "id": 32000, "logprob": -13.109375, "text": "<image>" }, { "id": 32000, "logprob": -12.8515625, "text": "<image>" }, { "id": 32000, "logprob": -14.7421875, "text": "<image>" }, { "id": 32000, "logprob": -12.84375, "text": "<image>" }, { "id": 32000, "logprob": -12.890625, "text": "<image>" }, { "id": 32000, "logprob": -12.609375, "text": "<image>" }, { "id": 32000, "logprob": -12.9296875, "text": "<image>" }, { "id": 32000, "logprob": -12.625, "text": "<image>" }, { "id": 32000, "logprob": -12.6171875, "text": "<image>" }, { "id": 32000, "logprob": -12.8671875, "text": "<image>" }, { "id": 32000, "logprob": -12.7265625, "text": "<image>" }, { "id": 32000, "logprob": -12.9765625, "text": "<image>" }, { "id": 32000, "logprob": -12.6484375, "text": "<image>" }, { "id": 32000, "logprob": -13.03125, "text": "<image>" }, { "id": 32000, "logprob": -13.4296875, "text": "<image>" }, { "id": 32000, "logprob": -13.2734375, "text": "<image>" }, { "id": 32000, "logprob": -12.9921875, "text": "<image>" }, { "id": 32000, "logprob": -11.5234375, "text": "<image>" }, { "id": 32000, "logprob": -15.015625, "text": "<image>" }, { "id": 32000, "logprob": -12.2109375, "text": "<image>" }, { "id": 32000, "logprob": -12.6953125, "text": "<image>" }, { "id": 32000, "logprob": -14.9375, "text": "<image>" }, { "id": 32000, "logprob": -12.6953125, "text": "<image>" }, { "id": 32000, "logprob": -10.78125, "text": "<image>" }, { "id": 32000, "logprob": -13.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.9921875, "text": "<image>" }, { "id": 32000, "logprob": -16.125, "text": "<image>" }, { "id": 32000, "logprob": -13.46875, "text": "<image>" }, { "id": 32000, "logprob": -13.7109375, "text": "<image>" }, { "id": 32000, "logprob": -13.6015625, "text": "<image>" }, { "id": 32000, "logprob": -13.25, "text": "<image>" }, { "id": 32000, "logprob": -13.2578125, "text": "<image>" }, { "id": 32000, "logprob": -13.5, "text": "<image>" }, { "id": 32000, "logprob": -13.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.6171875, "text": "<image>" }, { "id": 32000, "logprob": -14.2890625, "text": "<image>" }, { "id": 32000, "logprob": -14.4765625, "text": "<image>" }, { "id": 32000, "logprob": -13.1484375, "text": "<image>" }, { "id": 32000, "logprob": -17.0625, "text": "<image>" }, { "id": 32000, "logprob": -14.046875, "text": "<image>" }, { "id": 32000, "logprob": -14.2734375, "text": "<image>" }, { "id": 32000, "logprob": -12.625, "text": "<image>" }, { "id": 32000, "logprob": -12.5625, "text": "<image>" }, { "id": 32000, "logprob": -13.9140625, "text": "<image>" }, { "id": 32000, "logprob": -13.03125, "text": "<image>" }, { "id": 32000, "logprob": -10.5, "text": "<image>" }, { "id": 32000, "logprob": -13.1953125, "text": "<image>" }, { "id": 32000, "logprob": -13.53125, "text": "<image>" }, { "id": 32000, "logprob": -13.3828125, "text": "<image>" }, { "id": 32000, "logprob": -14.0625, "text": "<image>" }, { "id": 32000, "logprob": -13.5234375, "text": "<image>" }, { "id": 32000, "logprob": -14.15625, "text": "<image>" }, { "id": 32000, "logprob": -16.9375, "text": "<image>" }, { "id": 32000, "logprob": -14.015625, "text": "<image>" }, { "id": 32000, "logprob": -14.09375, "text": "<image>" }, { "id": 32000, "logprob": -13.828125, "text": "<image>" }, { "id": 32000, "logprob": -13.328125, "text": "<image>" }, { "id": 32000, "logprob": -13.0234375, "text": "<image>" }, { "id": 32000, "logprob": -13.375, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -14.7890625, "text": "<image>" }, { "id": 32000, "logprob": -13.53125, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.4609375, "text": "<image>" }, { "id": 32000, "logprob": -13.078125, "text": "<image>" }, { "id": 32000, "logprob": -12.3359375, "text": "<image>" }, { "id": 32000, "logprob": -13.6171875, "text": "<image>" }, { "id": 32000, "logprob": -12.75, "text": "<image>" }, { "id": 32000, "logprob": -14.859375, "text": "<image>" }, { "id": 32000, "logprob": -13.078125, "text": "<image>" }, { "id": 32000, "logprob": -14.171875, "text": "<image>" }, { "id": 32000, "logprob": -13.078125, "text": "<image>" }, { "id": 32000, "logprob": -14.4921875, "text": "<image>" }, { "id": 32000, "logprob": -18.15625, "text": "<image>" }, { "id": 32000, "logprob": -14.703125, "text": "<image>" }, { "id": 32000, "logprob": -14.71875, "text": "<image>" }, { "id": 32000, "logprob": -16.3125, "text": "<image>" }, { "id": 32000, "logprob": -14.9765625, "text": "<image>" }, { "id": 32000, "logprob": -13.8046875, "text": "<image>" }, { "id": 32000, "logprob": -15.375, "text": "<image>" }, { "id": 32000, "logprob": -13.3984375, "text": "<image>" }, { "id": 32000, "logprob": -14.578125, "text": "<image>" }, { "id": 32000, "logprob": -15.2890625, "text": "<image>" }, { "id": 32000, "logprob": -13.609375, "text": "<image>" }, { "id": 32000, "logprob": -13.328125, "text": "<image>" }, { "id": 32000, "logprob": -15.8359375, "text": "<image>" }, { "id": 32000, "logprob": -13.765625, "text": "<image>" }, { "id": 32000, "logprob": -16.90625, "text": "<image>" }, { "id": 32000, "logprob": -13.3203125, "text": "<image>" }, { "id": 32000, "logprob": -13.7734375, "text": "<image>" }, { "id": 32000, "logprob": -13.3203125, "text": "<image>" }, { "id": 32000, "logprob": -13.4765625, "text": "<image>" }, { "id": 32000, "logprob": -13.390625, "text": "<image>" }, { "id": 32000, "logprob": -13.2734375, "text": "<image>" }, { "id": 32000, "logprob": -13.046875, "text": "<image>" }, { "id": 32000, "logprob": -13.7421875, "text": "<image>" }, { "id": 32000, "logprob": -13.5, "text": "<image>" }, { "id": 32000, "logprob": -13.0390625, "text": "<image>" }, { "id": 32000, "logprob": -13.4375, "text": "<image>" }, { "id": 32000, "logprob": -13.7421875, "text": "<image>" }, { "id": 32000, "logprob": -13.6796875, "text": "<image>" }, { "id": 32000, "logprob": -13.546875, "text": "<image>" }, { "id": 32000, "logprob": -13.3203125, "text": "<image>" }, { "id": 32000, "logprob": -13.4453125, "text": "<image>" }, { "id": 32000, "logprob": -13.6171875, "text": "<image>" }, { "id": 32000, "logprob": -15.140625, "text": "<image>" }, { "id": 32000, "logprob": -13.21875, "text": "<image>" }, { "id": 32000, "logprob": -13.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.0234375, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -11.3125, "text": "<image>" }, { "id": 32000, "logprob": -10.65625, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -13.25, "text": "<image>" }, { "id": 32000, "logprob": -11.6328125, "text": "<image>" }, { "id": 32000, "logprob": -12.875, "text": "<image>" }, { "id": 32000, "logprob": -12.546875, "text": "<image>" }, { "id": 32000, "logprob": -12.8203125, "text": "<image>" }, { "id": 32000, "logprob": -13.9765625, "text": "<image>" }, { "id": 32000, "logprob": -13.1796875, "text": "<image>" }, { "id": 32000, "logprob": -12.984375, "text": "<image>" }, { "id": 32000, "logprob": -13.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.75, "text": "<image>" }, { "id": 32000, "logprob": -14.578125, "text": "<image>" }, { "id": 32000, "logprob": -14.625, "text": "<image>" }, { "id": 32000, "logprob": -14.734375, "text": "<image>" }, { "id": 32000, "logprob": -14.5078125, "text": "<image>" }, { "id": 32000, "logprob": -14.9140625, "text": "<image>" }, { "id": 32000, "logprob": -13.046875, "text": "<image>" }, { "id": 32000, "logprob": -13.453125, "text": "<image>" }, { "id": 32000, "logprob": -14.78125, "text": "<image>" }, { "id": 32000, "logprob": -14.9453125, "text": "<image>" }, { "id": 32000, "logprob": -13.9921875, "text": "<image>" }, { "id": 32000, "logprob": -13.4921875, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -14.4765625, "text": "<image>" }, { "id": 32000, "logprob": -15.9609375, "text": "<image>" }, { "id": 32000, "logprob": -14.234375, "text": "<image>" }, { "id": 32000, "logprob": -13.1328125, "text": "<image>" }, { "id": 32000, "logprob": -12.6640625, "text": "<image>" }, { "id": 32000, "logprob": -12.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.7734375, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.6796875, "text": "<image>" }, { "id": 32000, "logprob": -13.625, "text": "<image>" }, { "id": 32000, "logprob": -12.890625, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.7734375, "text": "<image>" }, { "id": 32000, "logprob": -14.265625, "text": "<image>" }, { "id": 32000, "logprob": -12.0234375, "text": "<image>" }, { "id": 32000, "logprob": -12.71875, "text": "<image>" }, { "id": 32000, "logprob": -13.0078125, "text": "<image>" }, { "id": 32000, "logprob": -12.8671875, "text": "<image>" }, { "id": 32000, "logprob": -15.078125, "text": "<image>" }, { "id": 32000, "logprob": -12.921875, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -10.5078125, "text": "<image>" }, { "id": 32000, "logprob": -10.265625, "text": "<image>" }, { "id": 32000, "logprob": -11.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.0703125, "text": "<image>" }, { "id": 32000, "logprob": -11.28125, "text": "<image>" }, { "id": 32000, "logprob": -11.046875, "text": "<image>" }, { "id": 32000, "logprob": -13.3203125, "text": "<image>" }, { "id": 32000, "logprob": -11.03125, "text": "<image>" }, { "id": 32000, "logprob": -12.53125, "text": "<image>" }, { "id": 32000, "logprob": -12.203125, "text": "<image>" }, { "id": 32000, "logprob": -12.015625, "text": "<image>" }, { "id": 32000, "logprob": -13.8125, "text": "<image>" }, { "id": 32000, "logprob": -12.0859375, "text": "<image>" }, { "id": 32000, "logprob": -14.1796875, "text": "<image>" }, { "id": 32000, "logprob": -14.0703125, "text": "<image>" }, { "id": 32000, "logprob": -14.03125, "text": "<image>" }, { "id": 32000, "logprob": -14.0625, "text": "<image>" }, { "id": 32000, "logprob": -13.0390625, "text": "<image>" }, { "id": 32000, "logprob": -12.8203125, "text": "<image>" }, { "id": 32000, "logprob": -14.453125, "text": "<image>" }, { "id": 32000, "logprob": -14.6875, "text": "<image>" }, { "id": 32000, "logprob": -14.3046875, "text": "<image>" }, { "id": 32000, "logprob": -14.8671875, "text": "<image>" }, { "id": 32000, "logprob": -14.1640625, "text": "<image>" }, { "id": 32000, "logprob": -13.203125, "text": "<image>" }, { "id": 32000, "logprob": -13.109375, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -13.3203125, "text": "<image>" }, { "id": 32000, "logprob": -16.140625, "text": "<image>" }, { "id": 32000, "logprob": -12.7109375, "text": "<image>" }, { "id": 32000, "logprob": -13.7109375, "text": "<image>" }, { "id": 32000, "logprob": -13.3984375, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -12.140625, "text": "<image>" }, { "id": 32000, "logprob": -12.3828125, "text": "<image>" }, { "id": 32000, "logprob": -12.875, "text": "<image>" }, { "id": 32000, "logprob": -13.6484375, "text": "<image>" }, { "id": 32000, "logprob": -13.0390625, "text": "<image>" }, { "id": 32000, "logprob": -13.3203125, "text": "<image>" }, { "id": 32000, "logprob": -12.6640625, "text": "<image>" }, { "id": 32000, "logprob": -12.34375, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.453125, "text": "<image>" }, { "id": 32000, "logprob": -12.015625, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -14.46875, "text": "<image>" }, { "id": 32000, "logprob": -13.3203125, "text": "<image>" }, { "id": 32000, "logprob": -12.71875, "text": "<image>" }, { "id": 32000, "logprob": -10.6484375, "text": "<image>" }, { "id": 32000, "logprob": -11.6484375, "text": "<image>" }, { "id": 32000, "logprob": -10.84375, "text": "<image>" }, { "id": 32000, "logprob": -10.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.2578125, "text": "<image>" }, { "id": 32000, "logprob": -11.7265625, "text": "<image>" }, { "id": 32000, "logprob": -10.8515625, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -13.140625, "text": "<image>" }, { "id": 32000, "logprob": -12.734375, "text": "<image>" }, { "id": 32000, "logprob": -12.1640625, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -13.1328125, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -12.84375, "text": "<image>" }, { "id": 32000, "logprob": -12.6640625, "text": "<image>" }, { "id": 32000, "logprob": -12.0859375, "text": "<image>" }, { "id": 32000, "logprob": -13.609375, "text": "<image>" }, { "id": 32000, "logprob": -12.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.3125, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -14.0546875, "text": "<image>" }, { "id": 32000, "logprob": -13.984375, "text": "<image>" }, { "id": 32000, "logprob": -13.7890625, "text": "<image>" }, { "id": 32000, "logprob": -13.2109375, "text": "<image>" }, { "id": 32000, "logprob": -13.484375, "text": "<image>" }, { "id": 32000, "logprob": -13.703125, "text": "<image>" }, { "id": 32000, "logprob": -12.59375, "text": "<image>" }, { "id": 32000, "logprob": -15.7890625, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.46875, "text": "<image>" }, { "id": 32000, "logprob": -12.0, "text": "<image>" }, { "id": 32000, "logprob": -11.3359375, "text": "<image>" }, { "id": 32000, "logprob": -11.4296875, "text": "<image>" }, { "id": 32000, "logprob": -11.8671875, "text": "<image>" }, { "id": 32000, "logprob": -13.828125, "text": "<image>" }, { "id": 32000, "logprob": -11.75, "text": "<image>" }, { "id": 32000, "logprob": -11.46875, "text": "<image>" }, { "id": 32000, "logprob": -11.4765625, "text": "<image>" }, { "id": 32000, "logprob": -11.6328125, "text": "<image>" }, { "id": 32000, "logprob": -11.625, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -15.2265625, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -13.3203125, "text": "<image>" }, { "id": 32000, "logprob": -15.2734375, "text": "<image>" }, { "id": 32000, "logprob": -11.1875, "text": "<image>" }, { "id": 32000, "logprob": -11.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -13.453125, "text": "<image>" }, { "id": 32000, "logprob": -13.0234375, "text": "<image>" }, { "id": 32000, "logprob": -11.53125, "text": "<image>" }, { "id": 32000, "logprob": -12.0078125, "text": "<image>" }, { "id": 32000, "logprob": -11.1875, "text": "<image>" }, { "id": 32000, "logprob": -11.890625, "text": "<image>" }, { "id": 32000, "logprob": -11.3125, "text": "<image>" }, { "id": 32000, "logprob": -11.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.4921875, "text": "<image>" }, { "id": 32000, "logprob": -12.0703125, "text": "<image>" }, { "id": 32000, "logprob": -13.3671875, "text": "<image>" }, { "id": 32000, "logprob": -13.28125, "text": "<image>" }, { "id": 32000, "logprob": -12.8359375, "text": "<image>" }, { "id": 32000, "logprob": -13.0390625, "text": "<image>" }, { "id": 32000, "logprob": -13.078125, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -13.484375, "text": "<image>" }, { "id": 32000, "logprob": -12.578125, "text": "<image>" }, { "id": 32000, "logprob": -13.703125, "text": "<image>" }, { "id": 32000, "logprob": -14.34375, "text": "<image>" }, { "id": 32000, "logprob": -14.3203125, "text": "<image>" }, { "id": 32000, "logprob": -12.7109375, "text": "<image>" }, { "id": 32000, "logprob": -14.28125, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -13.7890625, "text": "<image>" }, { "id": 32000, "logprob": -15.203125, "text": "<image>" }, { "id": 32000, "logprob": -13.28125, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -14.3203125, "text": "<image>" }, { "id": 32000, "logprob": -13.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.1171875, "text": "<image>" }, { "id": 32000, "logprob": -14.03125, "text": "<image>" }, { "id": 32000, "logprob": -14.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -11.953125, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.6953125, "text": "<image>" }, { "id": 32000, "logprob": -12.5390625, "text": "<image>" }, { "id": 32000, "logprob": -11.6484375, "text": "<image>" }, { "id": 32000, "logprob": -11.703125, "text": "<image>" }, { "id": 32000, "logprob": -11.65625, "text": "<image>" }, { "id": 32000, "logprob": -14.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -12.3359375, "text": "<image>" }, { "id": 32000, "logprob": -11.6328125, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -12.5234375, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -11.2734375, "text": "<image>" }, { "id": 32000, "logprob": -11.2578125, "text": "<image>" }, { "id": 32000, "logprob": -13.1640625, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -13.03125, "text": "<image>" }, { "id": 32000, "logprob": -11.5234375, "text": "<image>" }, { "id": 32000, "logprob": -14.015625, "text": "<image>" }, { "id": 32000, "logprob": -13.75, "text": "<image>" }, { "id": 32000, "logprob": -12.3359375, "text": "<image>" }, { "id": 32000, "logprob": -12.921875, "text": "<image>" }, { "id": 32000, "logprob": -13.2109375, "text": "<image>" }, { "id": 32000, "logprob": -12.8359375, "text": "<image>" }, { "id": 32000, "logprob": -13.0703125, "text": "<image>" }, { "id": 32000, "logprob": -11.78125, "text": "<image>" }, { "id": 32000, "logprob": -13.703125, "text": "<image>" }, { "id": 32000, "logprob": -14.46875, "text": "<image>" }, { "id": 32000, "logprob": -14.234375, "text": "<image>" }, { "id": 32000, "logprob": -13.6796875, "text": "<image>" }, { "id": 32000, "logprob": -13.8515625, "text": "<image>" }, { "id": 32000, "logprob": -12.3203125, "text": "<image>" }, { "id": 32000, "logprob": -14.25, "text": "<image>" }, { "id": 32000, "logprob": -14.28125, "text": "<image>" }, { "id": 32000, "logprob": -14.0, "text": "<image>" }, { "id": 32000, "logprob": -14.625, "text": "<image>" }, { "id": 32000, "logprob": -12.8515625, "text": "<image>" }, { "id": 32000, "logprob": -13.453125, "text": "<image>" }, { "id": 32000, "logprob": -15.4296875, "text": "<image>" }, { "id": 32000, "logprob": -13.1171875, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -14.671875, "text": "<image>" }, { "id": 32000, "logprob": -14.2578125, "text": "<image>" }, { "id": 32000, "logprob": -11.875, "text": "<image>" }, { "id": 32000, "logprob": -12.8359375, "text": "<image>" }, { "id": 32000, "logprob": -13.828125, "text": "<image>" }, { "id": 32000, "logprob": -13.59375, "text": "<image>" }, { "id": 32000, "logprob": -12.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -12.0078125, "text": "<image>" }, { "id": 32000, "logprob": -13.40625, "text": "<image>" }, { "id": 32000, "logprob": -14.8203125, "text": "<image>" }, { "id": 32000, "logprob": -11.046875, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.5234375, "text": "<image>" }, { "id": 32000, "logprob": -11.140625, "text": "<image>" }, { "id": 32000, "logprob": -10.7890625, "text": "<image>" }, { "id": 32000, "logprob": -10.9453125, "text": "<image>" }, { "id": 32000, "logprob": -10.890625, "text": "<image>" }, { "id": 32000, "logprob": -12.78125, "text": "<image>" }, { "id": 32000, "logprob": -11.1171875, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -11.1484375, "text": "<image>" }, { "id": 32000, "logprob": -11.15625, "text": "<image>" }, { "id": 32000, "logprob": -11.40625, "text": "<image>" }, { "id": 32000, "logprob": -12.734375, "text": "<image>" }, { "id": 32000, "logprob": -13.0859375, "text": "<image>" }, { "id": 32000, "logprob": -14.046875, "text": "<image>" }, { "id": 32000, "logprob": -12.9609375, "text": "<image>" }, { "id": 32000, "logprob": -12.34375, "text": "<image>" }, { "id": 32000, "logprob": -12.84375, "text": "<image>" }, { "id": 32000, "logprob": -13.9765625, "text": "<image>" }, { "id": 32000, "logprob": -12.6875, "text": "<image>" }, { "id": 32000, "logprob": -13.484375, "text": "<image>" }, { "id": 32000, "logprob": -13.53125, "text": "<image>" }, { "id": 32000, "logprob": -13.5859375, "text": "<image>" }, { "id": 32000, "logprob": -13.3828125, "text": "<image>" }, { "id": 32000, "logprob": -14.25, "text": "<image>" }, { "id": 32000, "logprob": -14.2578125, "text": "<image>" }, { "id": 32000, "logprob": -14.1640625, "text": "<image>" }, { "id": 32000, "logprob": -13.671875, "text": "<image>" }, { "id": 32000, "logprob": -14.2578125, "text": "<image>" }, { "id": 32000, "logprob": -13.8671875, "text": "<image>" }, { "id": 32000, "logprob": -12.921875, "text": "<image>" }, { "id": 32000, "logprob": -15.6484375, "text": "<image>" }, { "id": 32000, "logprob": -13.03125, "text": "<image>" }, { "id": 32000, "logprob": -12.34375, "text": "<image>" }, { "id": 32000, "logprob": -12.7265625, "text": "<image>" }, { "id": 32000, "logprob": -14.515625, "text": "<image>" }, { "id": 32000, "logprob": -12.75, "text": "<image>" }, { "id": 32000, "logprob": -12.265625, "text": "<image>" }, { "id": 32000, "logprob": -13.546875, "text": "<image>" }, { "id": 32000, "logprob": -13.1328125, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.5625, "text": "<image>" }, { "id": 32000, "logprob": -11.6640625, "text": "<image>" }, { "id": 32000, "logprob": -12.15625, "text": "<image>" }, { "id": 32000, "logprob": -11.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.390625, "text": "<image>" }, { "id": 32000, "logprob": -11.0546875, "text": "<image>" }, { "id": 32000, "logprob": -11.96875, "text": "<image>" }, { "id": 32000, "logprob": -12.0078125, "text": "<image>" }, { "id": 32000, "logprob": -12.984375, "text": "<image>" }, { "id": 32000, "logprob": -13.2578125, "text": "<image>" }, { "id": 32000, "logprob": -12.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.3515625, "text": "<image>" }, { "id": 32000, "logprob": -11.2578125, "text": "<image>" }, { "id": 32000, "logprob": -11.8828125, "text": "<image>" }, { "id": 32000, "logprob": -11.6484375, "text": "<image>" }, { "id": 32000, "logprob": -12.8125, "text": "<image>" }, { "id": 32000, "logprob": -11.15625, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -11.59375, "text": "<image>" }, { "id": 32000, "logprob": -13.6953125, "text": "<image>" }, { "id": 32000, "logprob": -12.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.0859375, "text": "<image>" }, { "id": 32000, "logprob": -12.03125, "text": "<image>" }, { "id": 32000, "logprob": -12.296875, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -13.84375, "text": "<image>" }, { "id": 32000, "logprob": -12.875, "text": "<image>" }, { "id": 32000, "logprob": -13.953125, "text": "<image>" }, { "id": 32000, "logprob": -13.53125, "text": "<image>" }, { "id": 32000, "logprob": -13.9609375, "text": "<image>" }, { "id": 32000, "logprob": -12.46875, "text": "<image>" }, { "id": 32000, "logprob": -13.3984375, "text": "<image>" }, { "id": 32000, "logprob": -14.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.921875, "text": "<image>" }, { "id": 32000, "logprob": -14.2578125, "text": "<image>" }, { "id": 32000, "logprob": -14.3046875, "text": "<image>" }, { "id": 32000, "logprob": -14.3984375, "text": "<image>" }, { "id": 32000, "logprob": -15.3203125, "text": "<image>" }, { "id": 32000, "logprob": -13.4140625, "text": "<image>" }, { "id": 32000, "logprob": -12.3515625, "text": "<image>" }, { "id": 32000, "logprob": -13.46875, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.2890625, "text": "<image>" }, { "id": 32000, "logprob": -14.3515625, "text": "<image>" }, { "id": 32000, "logprob": -13.140625, "text": "<image>" }, { "id": 32000, "logprob": -13.0234375, "text": "<image>" }, { "id": 32000, "logprob": -12.5625, "text": "<image>" }, { "id": 32000, "logprob": -11.8125, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -11.53125, "text": "<image>" }, { "id": 32000, "logprob": -12.2265625, "text": "<image>" }, { "id": 32000, "logprob": -11.2890625, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -11.46875, "text": "<image>" }, { "id": 32000, "logprob": -11.40625, "text": "<image>" }, { "id": 32000, "logprob": -12.03125, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.4453125, "text": "<image>" }, { "id": 32000, "logprob": -11.34375, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -11.96875, "text": "<image>" }, { "id": 32000, "logprob": -14.6015625, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.4765625, "text": "<image>" }, { "id": 32000, "logprob": -13.25, "text": "<image>" }, { "id": 32000, "logprob": -12.5390625, "text": "<image>" }, { "id": 32000, "logprob": -11.078125, "text": "<image>" }, { "id": 32000, "logprob": -11.5546875, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -11.75, "text": "<image>" }, { "id": 32000, "logprob": -12.6015625, "text": "<image>" }, { "id": 32000, "logprob": -14.3203125, "text": "<image>" }, { "id": 32000, "logprob": -13.15625, "text": "<image>" }, { "id": 32000, "logprob": -13.4453125, "text": "<image>" }, { "id": 32000, "logprob": -14.2890625, "text": "<image>" }, { "id": 32000, "logprob": -13.7890625, "text": "<image>" }, { "id": 32000, "logprob": -13.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.96875, "text": "<image>" }, { "id": 32000, "logprob": -13.46875, "text": "<image>" }, { "id": 32000, "logprob": -14.1171875, "text": "<image>" }, { "id": 32000, "logprob": -13.0703125, "text": "<image>" }, { "id": 32000, "logprob": -12.9609375, "text": "<image>" }, { "id": 32000, "logprob": -14.90625, "text": "<image>" }, { "id": 32000, "logprob": -13.0390625, "text": "<image>" }, { "id": 32000, "logprob": -10.921875, "text": "<image>" }, { "id": 32000, "logprob": -13.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.8671875, "text": "<image>" }, { "id": 32000, "logprob": -12.15625, "text": "<image>" }, { "id": 32000, "logprob": -12.4140625, "text": "<image>" }, { "id": 32000, "logprob": -12.421875, "text": "<image>" }, { "id": 32000, "logprob": -13.59375, "text": "<image>" }, { "id": 32000, "logprob": -12.5, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.2265625, "text": "<image>" }, { "id": 32000, "logprob": -14.3828125, "text": "<image>" }, { "id": 32000, "logprob": -11.609375, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.2578125, "text": "<image>" }, { "id": 32000, "logprob": -11.0, "text": "<image>" }, { "id": 32000, "logprob": -11.6484375, "text": "<image>" }, { "id": 32000, "logprob": -11.1328125, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.6875, "text": "<image>" }, { "id": 32000, "logprob": -12.484375, "text": "<image>" }, { "id": 32000, "logprob": -11.8828125, "text": "<image>" }, { "id": 32000, "logprob": -12.53125, "text": "<image>" }, { "id": 32000, "logprob": -12.6484375, "text": "<image>" }, { "id": 32000, "logprob": -11.7265625, "text": "<image>" }, { "id": 32000, "logprob": -13.671875, "text": "<image>" }, { "id": 32000, "logprob": -14.8046875, "text": "<image>" }, { "id": 32000, "logprob": -11.609375, "text": "<image>" }, { "id": 32000, "logprob": -13.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.421875, "text": "<image>" }, { "id": 32000, "logprob": -12.328125, "text": "<image>" }, { "id": 32000, "logprob": -13.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.5078125, "text": "<image>" }, { "id": 32000, "logprob": -13.125, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -12.2890625, "text": "<image>" }, { "id": 32000, "logprob": -12.7734375, "text": "<image>" }, { "id": 32000, "logprob": -13.15625, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -13.8203125, "text": "<image>" }, { "id": 32000, "logprob": -13.65625, "text": "<image>" }, { "id": 32000, "logprob": -13.0390625, "text": "<image>" }, { "id": 32000, "logprob": -15.6953125, "text": "<image>" }, { "id": 32000, "logprob": -14.2265625, "text": "<image>" }, { "id": 32000, "logprob": -14.4140625, "text": "<image>" }, { "id": 32000, "logprob": -13.1328125, "text": "<image>" }, { "id": 32000, "logprob": -12.296875, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.328125, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -14.0, "text": "<image>" }, { "id": 32000, "logprob": -14.234375, "text": "<image>" }, { "id": 32000, "logprob": -11.4375, "text": "<image>" }, { "id": 32000, "logprob": -11.828125, "text": "<image>" }, { "id": 32000, "logprob": -11.453125, "text": "<image>" }, { "id": 32000, "logprob": -11.453125, "text": "<image>" }, { "id": 32000, "logprob": -11.984375, "text": "<image>" }, { "id": 32000, "logprob": -11.609375, "text": "<image>" }, { "id": 32000, "logprob": -11.8046875, "text": "<image>" }, { "id": 32000, "logprob": -11.7109375, "text": "<image>" }, { "id": 32000, "logprob": -12.0859375, "text": "<image>" }, { "id": 32000, "logprob": -10.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.734375, "text": "<image>" }, { "id": 32000, "logprob": -10.8046875, "text": "<image>" }, { "id": 32000, "logprob": -11.578125, "text": "<image>" }, { "id": 32000, "logprob": -11.15625, "text": "<image>" }, { "id": 32000, "logprob": -12.796875, "text": "<image>" }, { "id": 32000, "logprob": -11.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.328125, "text": "<image>" }, { "id": 32000, "logprob": -12.9296875, "text": "<image>" }, { "id": 32000, "logprob": -11.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.5625, "text": "<image>" }, { "id": 32000, "logprob": -11.84375, "text": "<image>" }, { "id": 32000, "logprob": -10.4140625, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -12.671875, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -13.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.0703125, "text": "<image>" }, { "id": 32000, "logprob": -13.015625, "text": "<image>" }, { "id": 32000, "logprob": -14.1484375, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -12.921875, "text": "<image>" }, { "id": 32000, "logprob": -12.96875, "text": "<image>" }, { "id": 32000, "logprob": -13.03125, "text": "<image>" }, { "id": 32000, "logprob": -13.953125, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -14.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.6015625, "text": "<image>" }, { "id": 32000, "logprob": -13.640625, "text": "<image>" }, { "id": 32000, "logprob": -15.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.7421875, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -11.5078125, "text": "<image>" }, { "id": 32000, "logprob": -12.3828125, "text": "<image>" }, { "id": 32000, "logprob": -13.6328125, "text": "<image>" }, { "id": 32000, "logprob": -12.4296875, "text": "<image>" }, { "id": 32000, "logprob": -11.3984375, "text": "<image>" }, { "id": 32000, "logprob": -11.5, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -14.0859375, "text": "<image>" }, { "id": 32000, "logprob": -10.9296875, "text": "<image>" }, { "id": 32000, "logprob": -11.0546875, "text": "<image>" }, { "id": 32000, "logprob": -11.296875, "text": "<image>" }, { "id": 32000, "logprob": -11.9609375, "text": "<image>" }, { "id": 32000, "logprob": -14.96875, "text": "<image>" }, { "id": 32000, "logprob": -12.6328125, "text": "<image>" }, { "id": 32000, "logprob": -12.75, "text": "<image>" }, { "id": 32000, "logprob": -12.6953125, "text": "<image>" }, { "id": 32000, "logprob": -11.9609375, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.2890625, "text": "<image>" }, { "id": 32000, "logprob": -11.421875, "text": "<image>" }, { "id": 32000, "logprob": -10.984375, "text": "<image>" }, { "id": 32000, "logprob": -12.140625, "text": "<image>" }, { "id": 32000, "logprob": -12.2578125, "text": "<image>" }, { "id": 32000, "logprob": -12.46875, "text": "<image>" }, { "id": 32000, "logprob": -11.5, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -13.9609375, "text": "<image>" }, { "id": 32000, "logprob": -14.1328125, "text": "<image>" }, { "id": 32000, "logprob": -12.9375, "text": "<image>" }, { "id": 32000, "logprob": -14.6328125, "text": "<image>" }, { "id": 32000, "logprob": -12.609375, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.7578125, "text": "<image>" }, { "id": 32000, "logprob": -12.8828125, "text": "<image>" }, { "id": 32000, "logprob": -13.3125, "text": "<image>" }, { "id": 32000, "logprob": -13.2734375, "text": "<image>" }, { "id": 32000, "logprob": -12.75, "text": "<image>" }, { "id": 32000, "logprob": -13.59375, "text": "<image>" }, { "id": 32000, "logprob": -13.8203125, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -13.765625, "text": "<image>" }, { "id": 32000, "logprob": -13.0625, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -14.1484375, "text": "<image>" }, { "id": 32000, "logprob": -13.25, "text": "<image>" }, { "id": 32000, "logprob": -11.9609375, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -15.0, "text": "<image>" }, { "id": 32000, "logprob": -12.9296875, "text": "<image>" }, { "id": 32000, "logprob": -12.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -11.9375, "text": "<image>" }, { "id": 32000, "logprob": -11.890625, "text": "<image>" }, { "id": 32000, "logprob": -12.5625, "text": "<image>" }, { "id": 32000, "logprob": -11.4453125, "text": "<image>" }, { "id": 32000, "logprob": -13.453125, "text": "<image>" }, { "id": 32000, "logprob": -11.109375, "text": "<image>" }, { "id": 32000, "logprob": -11.2890625, "text": "<image>" }, { "id": 32000, "logprob": -11.09375, "text": "<image>" }, { "id": 32000, "logprob": -11.1875, "text": "<image>" }, { "id": 32000, "logprob": -10.96875, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -11.4296875, "text": "<image>" }, { "id": 32000, "logprob": -12.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -11.3125, "text": "<image>" }, { "id": 32000, "logprob": -11.1171875, "text": "<image>" }, { "id": 32000, "logprob": -11.75, "text": "<image>" }, { "id": 32000, "logprob": -10.921875, "text": "<image>" }, { "id": 32000, "logprob": -10.7890625, "text": "<image>" }, { "id": 32000, "logprob": -11.1015625, "text": "<image>" }, { "id": 32000, "logprob": -11.4296875, "text": "<image>" }, { "id": 32000, "logprob": -12.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.7265625, "text": "<image>" }, { "id": 32000, "logprob": -12.6015625, "text": "<image>" }, { "id": 32000, "logprob": -12.6796875, "text": "<image>" }, { "id": 32000, "logprob": -12.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.296875, "text": "<image>" }, { "id": 32000, "logprob": -13.3671875, "text": "<image>" }, { "id": 32000, "logprob": -12.9765625, "text": "<image>" }, { "id": 32000, "logprob": -12.109375, "text": "<image>" }, { "id": 32000, "logprob": -12.53125, "text": "<image>" }, { "id": 32000, "logprob": -12.6015625, "text": "<image>" }, { "id": 32000, "logprob": -12.625, "text": "<image>" }, { "id": 32000, "logprob": -12.109375, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -13.4296875, "text": "<image>" }, { "id": 32000, "logprob": -13.9140625, "text": "<image>" }, { "id": 32000, "logprob": -14.71875, "text": "<image>" }, { "id": 32000, "logprob": -13.6015625, "text": "<image>" }, { "id": 32000, "logprob": -13.7578125, "text": "<image>" }, { "id": 32000, "logprob": -14.0703125, "text": "<image>" }, { "id": 32000, "logprob": -14.5390625, "text": "<image>" }, { "id": 32000, "logprob": -13.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -11.421875, "text": "<image>" }, { "id": 32000, "logprob": -11.625, "text": "<image>" }, { "id": 32000, "logprob": -12.1640625, "text": "<image>" }, { "id": 32000, "logprob": -13.2421875, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -11.4296875, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -12.0703125, "text": "<image>" }, { "id": 32000, "logprob": -11.2421875, "text": "<image>" }, { "id": 32000, "logprob": -11.59375, "text": "<image>" }, { "id": 32000, "logprob": -11.3125, "text": "<image>" }, { "id": 32000, "logprob": -11.453125, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -12.2109375, "text": "<image>" }, { "id": 32000, "logprob": -12.203125, "text": "<image>" }, { "id": 32000, "logprob": -12.03125, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -11.25, "text": "<image>" }, { "id": 32000, "logprob": -12.3203125, "text": "<image>" }, { "id": 32000, "logprob": -11.0390625, "text": "<image>" }, { "id": 32000, "logprob": -11.1953125, "text": "<image>" }, { "id": 32000, "logprob": -11.1171875, "text": "<image>" }, { "id": 32000, "logprob": -11.9375, "text": "<image>" }, { "id": 32000, "logprob": -10.984375, "text": "<image>" }, { "id": 32000, "logprob": -11.390625, "text": "<image>" }, { "id": 32000, "logprob": -12.8828125, "text": "<image>" }, { "id": 32000, "logprob": -14.15625, "text": "<image>" }, { "id": 32000, "logprob": -11.75, "text": "<image>" }, { "id": 32000, "logprob": -12.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.2421875, "text": "<image>" }, { "id": 32000, "logprob": -12.1171875, "text": "<image>" }, { "id": 32000, "logprob": -12.515625, "text": "<image>" }, { "id": 32000, "logprob": -11.890625, "text": "<image>" }, { "id": 32000, "logprob": -13.171875, "text": "<image>" }, { "id": 32000, "logprob": -15.796875, "text": "<image>" }, { "id": 32000, "logprob": -11.6015625, "text": "<image>" }, { "id": 32000, "logprob": -13.328125, "text": "<image>" }, { "id": 32000, "logprob": -13.6484375, "text": "<image>" }, { "id": 32000, "logprob": -14.875, "text": "<image>" }, { "id": 32000, "logprob": -13.7265625, "text": "<image>" }, { "id": 32000, "logprob": -13.5234375, "text": "<image>" }, { "id": 32000, "logprob": -13.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.609375, "text": "<image>" }, { "id": 32000, "logprob": -13.7890625, "text": "<image>" }, { "id": 32000, "logprob": -14.359375, "text": "<image>" }, { "id": 32000, "logprob": -12.1484375, "text": "<image>" }, { "id": 32000, "logprob": -11.8125, "text": "<image>" }, { "id": 32000, "logprob": -11.890625, "text": "<image>" }, { "id": 32000, "logprob": -11.4921875, "text": "<image>" }, { "id": 32000, "logprob": -12.5703125, "text": "<image>" }, { "id": 32000, "logprob": -11.625, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.578125, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -12.34375, "text": "<image>" }, { "id": 32000, "logprob": -11.234375, "text": "<image>" }, { "id": 32000, "logprob": -11.03125, "text": "<image>" }, { "id": 32000, "logprob": -11.7265625, "text": "<image>" }, { "id": 32000, "logprob": -11.84375, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -11.2421875, "text": "<image>" }, { "id": 32000, "logprob": -11.328125, "text": "<image>" }, { "id": 32000, "logprob": -11.953125, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.3984375, "text": "<image>" }, { "id": 32000, "logprob": -11.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.203125, "text": "<image>" }, { "id": 32000, "logprob": -13.609375, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.578125, "text": "<image>" }, { "id": 32000, "logprob": -11.65625, "text": "<image>" }, { "id": 32000, "logprob": -12.25, "text": "<image>" }, { "id": 32000, "logprob": -11.7109375, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -11.3359375, "text": "<image>" }, { "id": 32000, "logprob": -11.5859375, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -11.3046875, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -12.6953125, "text": "<image>" }, { "id": 32000, "logprob": -12.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.875, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -13.1875, "text": "<image>" }, { "id": 32000, "logprob": -13.7265625, "text": "<image>" }, { "id": 32000, "logprob": -12.4296875, "text": "<image>" }, { "id": 32000, "logprob": -13.8203125, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.09375, "text": "<image>" }, { "id": 32000, "logprob": -14.546875, "text": "<image>" }, { "id": 32000, "logprob": -14.015625, "text": "<image>" }, { "id": 32000, "logprob": -12.5546875, "text": "<image>" }, { "id": 32000, "logprob": -11.7890625, "text": "<image>" }, { "id": 32000, "logprob": -10.6953125, "text": "<image>" }, { "id": 32000, "logprob": -11.59375, "text": "<image>" }, { "id": 32000, "logprob": -12.0234375, "text": "<image>" }, { "id": 32000, "logprob": -12.734375, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -14.34375, "text": "<image>" }, { "id": 32000, "logprob": -11.8984375, "text": "<image>" }, { "id": 32000, "logprob": -13.4375, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -11.03125, "text": "<image>" }, { "id": 32000, "logprob": -11.421875, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -11.2734375, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -11.6640625, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -11.484375, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.875, "text": "<image>" }, { "id": 32000, "logprob": -12.3125, "text": "<image>" }, { "id": 32000, "logprob": -11.65625, "text": "<image>" }, { "id": 32000, "logprob": -12.9140625, "text": "<image>" }, { "id": 32000, "logprob": -11.7421875, "text": "<image>" }, { "id": 32000, "logprob": -11.7265625, "text": "<image>" }, { "id": 32000, "logprob": -11.3984375, "text": "<image>" }, { "id": 32000, "logprob": -11.53125, "text": "<image>" }, { "id": 32000, "logprob": -11.34375, "text": "<image>" }, { "id": 32000, "logprob": -11.875, "text": "<image>" }, { "id": 32000, "logprob": -12.890625, "text": "<image>" }, { "id": 32000, "logprob": -10.9609375, "text": "<image>" }, { "id": 32000, "logprob": -13.265625, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -14.6640625, "text": "<image>" }, { "id": 32000, "logprob": -12.3828125, "text": "<image>" }, { "id": 32000, "logprob": -14.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -15.125, "text": "<image>" }, { "id": 32000, "logprob": -12.6640625, "text": "<image>" }, { "id": 32000, "logprob": -12.734375, "text": "<image>" }, { "id": 32000, "logprob": -14.734375, "text": "<image>" }, { "id": 32000, "logprob": -13.5234375, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -13.40625, "text": "<image>" }, { "id": 32000, "logprob": -14.3828125, "text": "<image>" }, { "id": 32000, "logprob": -12.9453125, "text": "<image>" }, { "id": 32000, "logprob": -12.609375, "text": "<image>" }, { "id": 32000, "logprob": -11.9296875, "text": "<image>" }, { "id": 32000, "logprob": -12.953125, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.0078125, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -13.8828125, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -13.84375, "text": "<image>" }, { "id": 32000, "logprob": -14.25, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.09375, "text": "<image>" }, { "id": 32000, "logprob": -13.0078125, "text": "<image>" }, { "id": 32000, "logprob": -12.140625, "text": "<image>" }, { "id": 32000, "logprob": -13.1171875, "text": "<image>" }, { "id": 32000, "logprob": -13.96875, "text": "<image>" }, { "id": 32000, "logprob": -11.625, "text": "<image>" }, { "id": 32000, "logprob": -11.484375, "text": "<image>" }, { "id": 32000, "logprob": -11.328125, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -13.5859375, "text": "<image>" }, { "id": 32000, "logprob": -11.0, "text": "<image>" }, { "id": 32000, "logprob": -12.03125, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -11.46875, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -11.7578125, "text": "<image>" }, { "id": 32000, "logprob": -11.296875, "text": "<image>" }, { "id": 32000, "logprob": -12.2109375, "text": "<image>" }, { "id": 32000, "logprob": -11.7421875, "text": "<image>" }, { "id": 32000, "logprob": -10.921875, "text": "<image>" }, { "id": 32000, "logprob": -11.953125, "text": "<image>" }, { "id": 32000, "logprob": -13.203125, "text": "<image>" }, { "id": 32000, "logprob": -11.5625, "text": "<image>" }, { "id": 32000, "logprob": -14.7421875, "text": "<image>" }, { "id": 32000, "logprob": -12.6875, "text": "<image>" }, { "id": 32000, "logprob": -12.2578125, "text": "<image>" }, { "id": 32000, "logprob": -15.296875, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -12.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -13.6875, "text": "<image>" }, { "id": 32000, "logprob": -12.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -13.6796875, "text": "<image>" }, { "id": 32000, "logprob": -14.109375, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.6953125, "text": "<image>" }, { "id": 32000, "logprob": -11.84375, "text": "<image>" }, { "id": 32000, "logprob": -11.7421875, "text": "<image>" }, { "id": 32000, "logprob": -12.7578125, "text": "<image>" }, { "id": 32000, "logprob": -11.8203125, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.453125, "text": "<image>" }, { "id": 32000, "logprob": -11.359375, "text": "<image>" }, { "id": 32000, "logprob": -12.984375, "text": "<image>" }, { "id": 32000, "logprob": -15.15625, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -12.765625, "text": "<image>" }, { "id": 32000, "logprob": -13.4921875, "text": "<image>" }, { "id": 32000, "logprob": -11.5546875, "text": "<image>" }, { "id": 32000, "logprob": -11.6953125, "text": "<image>" }, { "id": 32000, "logprob": -11.359375, "text": "<image>" }, { "id": 32000, "logprob": -11.5, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -14.5625, "text": "<image>" }, { "id": 32000, "logprob": -13.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.1953125, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.7578125, "text": "<image>" }, { "id": 32000, "logprob": -11.6015625, "text": "<image>" }, { "id": 32000, "logprob": -13.2578125, "text": "<image>" }, { "id": 32000, "logprob": -11.125, "text": "<image>" }, { "id": 32000, "logprob": -11.3359375, "text": "<image>" }, { "id": 32000, "logprob": -11.3984375, "text": "<image>" }, { "id": 32000, "logprob": -11.8125, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -11.765625, "text": "<image>" }, { "id": 32000, "logprob": -11.1953125, "text": "<image>" }, { "id": 32000, "logprob": -13.3359375, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -12.703125, "text": "<image>" }, { "id": 32000, "logprob": -12.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.3203125, "text": "<image>" }, { "id": 32000, "logprob": -13.6171875, "text": "<image>" }, { "id": 32000, "logprob": -13.328125, "text": "<image>" }, { "id": 32000, "logprob": -12.828125, "text": "<image>" }, { "id": 32000, "logprob": -13.140625, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.796875, "text": "<image>" }, { "id": 32000, "logprob": -12.78125, "text": "<image>" }, { "id": 32000, "logprob": -12.59375, "text": "<image>" }, { "id": 32000, "logprob": -12.765625, "text": "<image>" }, { "id": 32000, "logprob": -11.46875, "text": "<image>" }, { "id": 32000, "logprob": -12.484375, "text": "<image>" }, { "id": 32000, "logprob": -11.59375, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -12.59375, "text": "<image>" }, { "id": 32000, "logprob": -11.3125, "text": "<image>" }, { "id": 32000, "logprob": -14.25, "text": "<image>" }, { "id": 32000, "logprob": -12.0234375, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -11.609375, "text": "<image>" }, { "id": 32000, "logprob": -13.5234375, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -12.6953125, "text": "<image>" }, { "id": 32000, "logprob": -11.703125, "text": "<image>" }, { "id": 32000, "logprob": -11.8828125, "text": "<image>" }, { "id": 32000, "logprob": -11.84375, "text": "<image>" }, { "id": 32000, "logprob": -11.7890625, "text": "<image>" }, { "id": 32000, "logprob": -12.3515625, "text": "<image>" }, { "id": 32000, "logprob": -14.3046875, "text": "<image>" }, { "id": 32000, "logprob": -11.7578125, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -11.328125, "text": "<image>" }, { "id": 32000, "logprob": -14.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.25, "text": "<image>" }, { "id": 32000, "logprob": -11.3125, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -11.875, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -13.71875, "text": "<image>" }, { "id": 32000, "logprob": -12.0625, "text": "<image>" }, { "id": 32000, "logprob": -11.046875, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -13.390625, "text": "<image>" }, { "id": 32000, "logprob": -12.515625, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -14.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.5625, "text": "<image>" }, { "id": 32000, "logprob": -11.53125, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.46875, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -12.515625, "text": "<image>" }, { "id": 32000, "logprob": -11.7890625, "text": "<image>" }, { "id": 32000, "logprob": -13.1328125, "text": "<image>" }, { "id": 32000, "logprob": -12.8984375, "text": "<image>" }, { "id": 32000, "logprob": -14.609375, "text": "<image>" }, { "id": 32000, "logprob": -11.53125, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -13.0625, "text": "<image>" }, { "id": 32000, "logprob": -11.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -12.0234375, "text": "<image>" }, { "id": 32000, "logprob": -12.453125, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.9296875, "text": "<image>" }, { "id": 32000, "logprob": -11.7109375, "text": "<image>" }, { "id": 32000, "logprob": -11.546875, "text": "<image>" }, { "id": 32000, "logprob": -12.796875, "text": "<image>" }, { "id": 32000, "logprob": -13.0234375, "text": "<image>" }, { "id": 32000, "logprob": -12.8125, "text": "<image>" }, { "id": 32000, "logprob": -12.59375, "text": "<image>" }, { "id": 32000, "logprob": -10.7890625, "text": "<image>" }, { "id": 32000, "logprob": -11.96875, "text": "<image>" }, { "id": 32000, "logprob": -11.4921875, "text": "<image>" }, { "id": 32000, "logprob": -11.25, "text": "<image>" }, { "id": 32000, "logprob": -11.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.6953125, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -11.515625, "text": "<image>" }, { "id": 32000, "logprob": -11.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -11.2109375, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.6484375, "text": "<image>" }, { "id": 32000, "logprob": -11.515625, "text": "<image>" }, { "id": 32000, "logprob": -12.8125, "text": "<image>" }, { "id": 32000, "logprob": -12.1953125, "text": "<image>" }, { "id": 32000, "logprob": -13.2890625, "text": "<image>" }, { "id": 32000, "logprob": -11.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.3359375, "text": "<image>" }, { "id": 32000, "logprob": -12.6015625, "text": "<image>" }, { "id": 32000, "logprob": -13.0078125, "text": "<image>" }, { "id": 32000, "logprob": -15.328125, "text": "<image>" }, { "id": 32000, "logprob": -14.8125, "text": "<image>" }, { "id": 32000, "logprob": -11.0390625, "text": "<image>" }, { "id": 32000, "logprob": -13.0703125, "text": "<image>" }, { "id": 32000, "logprob": -11.3828125, "text": "<image>" }, { "id": 32000, "logprob": -14.0859375, "text": "<image>" }, { "id": 32000, "logprob": -14.734375, "text": "<image>" }, { "id": 32000, "logprob": -10.0234375, "text": "<image>" }, { "id": 32000, "logprob": -11.46875, "text": "<image>" }, { "id": 32000, "logprob": -13.953125, "text": "<image>" }, { "id": 32000, "logprob": -14.28125, "text": "<image>" }, { "id": 32000, "logprob": -14.8828125, "text": "<image>" }, { "id": 32000, "logprob": -11.578125, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -12.34375, "text": "<image>" }, { "id": 32000, "logprob": -11.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.0078125, "text": "<image>" }, { "id": 32000, "logprob": -12.4296875, "text": "<image>" }, { "id": 32000, "logprob": -11.8984375, "text": "<image>" }, { "id": 32000, "logprob": -11.984375, "text": "<image>" }, { "id": 32000, "logprob": -11.734375, "text": "<image>" }, { "id": 32000, "logprob": -11.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.1875, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -12.953125, "text": "<image>" }, { "id": 32000, "logprob": -10.40625, "text": "<image>" }, { "id": 32000, "logprob": -13.1015625, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.7890625, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -11.765625, "text": "<image>" }, { "id": 32000, "logprob": -11.578125, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.34375, "text": "<image>" }, { "id": 32000, "logprob": -12.015625, "text": "<image>" }, { "id": 32000, "logprob": -11.75, "text": "<image>" }, { "id": 32000, "logprob": -12.2578125, "text": "<image>" }, { "id": 32000, "logprob": -11.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -12.4453125, "text": "<image>" }, { "id": 32000, "logprob": -11.8671875, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -15.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -12.25, "text": "<image>" }, { "id": 32000, "logprob": -14.421875, "text": "<image>" }, { "id": 32000, "logprob": -13.03125, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -14.8046875, "text": "<image>" }, { "id": 32000, "logprob": -13.53125, "text": "<image>" }, { "id": 32000, "logprob": -15.03125, "text": "<image>" }, { "id": 32000, "logprob": -13.8984375, "text": "<image>" }, { "id": 32000, "logprob": -10.578125, "text": "<image>" }, { "id": 32000, "logprob": -15.0, "text": "<image>" }, { "id": 32000, "logprob": -12.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.8203125, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -11.15625, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -11.515625, "text": "<image>" }, { "id": 32000, "logprob": -11.4765625, "text": "<image>" }, { "id": 32000, "logprob": -11.4765625, "text": "<image>" }, { "id": 32000, "logprob": -11.4609375, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.9609375, "text": "<image>" }, { "id": 32000, "logprob": -12.8515625, "text": "<image>" }, { "id": 32000, "logprob": -12.03125, "text": "<image>" }, { "id": 32000, "logprob": -13.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.3203125, "text": "<image>" }, { "id": 32000, "logprob": -10.8515625, "text": "<image>" }, { "id": 32000, "logprob": -10.671875, "text": "<image>" }, { "id": 32000, "logprob": -11.875, "text": "<image>" }, { "id": 32000, "logprob": -10.9140625, "text": "<image>" }, { "id": 32000, "logprob": -10.65625, "text": "<image>" }, { "id": 32000, "logprob": -10.484375, "text": "<image>" }, { "id": 32000, "logprob": -11.1796875, "text": "<image>" }, { "id": 32000, "logprob": -12.2890625, "text": "<image>" }, { "id": 32000, "logprob": -10.96875, "text": "<image>" }, { "id": 32000, "logprob": -11.6015625, "text": "<image>" }, { "id": 32000, "logprob": -11.4453125, "text": "<image>" }, { "id": 32000, "logprob": -10.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.1640625, "text": "<image>" }, { "id": 32000, "logprob": -11.0859375, "text": "<image>" }, { "id": 32000, "logprob": -11.546875, "text": "<image>" }, { "id": 32000, "logprob": -15.765625, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.484375, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -10.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.25, "text": "<image>" }, { "id": 32000, "logprob": -11.828125, "text": "<image>" }, { "id": 32000, "logprob": -11.84375, "text": "<image>" }, { "id": 32000, "logprob": -11.0625, "text": "<image>" }, { "id": 32000, "logprob": -13.953125, "text": "<image>" }, { "id": 32000, "logprob": -13.5703125, "text": "<image>" }, { "id": 32000, "logprob": -13.1796875, "text": "<image>" }, { "id": 32000, "logprob": -13.6171875, "text": "<image>" }, { "id": 32000, "logprob": -14.0078125, "text": "<image>" }, { "id": 32000, "logprob": -11.171875, "text": "<image>" }, { "id": 32000, "logprob": -14.140625, "text": "<image>" }, { "id": 32000, "logprob": -12.25, "text": "<image>" }, { "id": 32000, "logprob": -11.7265625, "text": "<image>" }, { "id": 32000, "logprob": -12.3203125, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -11.8828125, "text": "<image>" }, { "id": 32000, "logprob": -12.3125, "text": "<image>" }, { "id": 32000, "logprob": -11.5625, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -14.6796875, "text": "<image>" }, { "id": 32000, "logprob": -11.046875, "text": "<image>" }, { "id": 32000, "logprob": -11.5, "text": "<image>" }, { "id": 32000, "logprob": -12.8125, "text": "<image>" }, { "id": 32000, "logprob": -11.9296875, "text": "<image>" }, { "id": 32000, "logprob": -10.546875, "text": "<image>" }, { "id": 32000, "logprob": -11.7109375, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -12.0859375, "text": "<image>" }, { "id": 32000, "logprob": -11.4296875, "text": "<image>" }, { "id": 32000, "logprob": -12.46875, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -11.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.96875, "text": "<image>" }, { "id": 32000, "logprob": -10.875, "text": "<image>" }, { "id": 32000, "logprob": -10.890625, "text": "<image>" }, { "id": 32000, "logprob": -11.0078125, "text": "<image>" }, { "id": 32000, "logprob": -10.609375, "text": "<image>" }, { "id": 32000, "logprob": -10.7890625, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -11.390625, "text": "<image>" }, { "id": 32000, "logprob": -11.2890625, "text": "<image>" }, { "id": 32000, "logprob": -11.984375, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -12.2421875, "text": "<image>" }, { "id": 32000, "logprob": -11.3828125, "text": "<image>" }, { "id": 32000, "logprob": -10.7734375, "text": "<image>" }, { "id": 32000, "logprob": -10.703125, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -12.0, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -11.4453125, "text": "<image>" }, { "id": 32000, "logprob": -9.6328125, "text": "<image>" }, { "id": 32000, "logprob": -10.859375, "text": "<image>" }, { "id": 32000, "logprob": -13.03125, "text": "<image>" }, { "id": 32000, "logprob": -12.890625, "text": "<image>" }, { "id": 32000, "logprob": -13.4765625, "text": "<image>" }, { "id": 32000, "logprob": -14.109375, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -13.484375, "text": "<image>" }, { "id": 32000, "logprob": -13.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -10.90625, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -12.5703125, "text": "<image>" }, { "id": 32000, "logprob": -12.7734375, "text": "<image>" }, { "id": 32000, "logprob": -12.890625, "text": "<image>" }, { "id": 32000, "logprob": -13.0625, "text": "<image>" }, { "id": 32000, "logprob": -11.2265625, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -12.2890625, "text": "<image>" }, { "id": 32000, "logprob": -11.859375, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -15.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -11.0, "text": "<image>" }, { "id": 32000, "logprob": -11.2734375, "text": "<image>" }, { "id": 32000, "logprob": -12.3203125, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -10.9296875, "text": "<image>" }, { "id": 32000, "logprob": -10.921875, "text": "<image>" }, { "id": 32000, "logprob": -10.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.59375, "text": "<image>" }, { "id": 32000, "logprob": -11.40625, "text": "<image>" }, { "id": 32000, "logprob": -11.53125, "text": "<image>" }, { "id": 32000, "logprob": -10.8046875, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -11.6875, "text": "<image>" }, { "id": 32000, "logprob": -11.6484375, "text": "<image>" }, { "id": 32000, "logprob": -12.5078125, "text": "<image>" }, { "id": 32000, "logprob": -12.1171875, "text": "<image>" }, { "id": 32000, "logprob": -12.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -12.0, "text": "<image>" }, { "id": 32000, "logprob": -13.7265625, "text": "<image>" }, { "id": 32000, "logprob": -15.7734375, "text": "<image>" }, { "id": 32000, "logprob": -11.65625, "text": "<image>" }, { "id": 32000, "logprob": -12.359375, "text": "<image>" }, { "id": 32000, "logprob": -12.1484375, "text": "<image>" }, { "id": 32000, "logprob": -14.21875, "text": "<image>" }, { "id": 32000, "logprob": -14.796875, "text": "<image>" }, { "id": 32000, "logprob": -17.5625, "text": "<image>" }, { "id": 32000, "logprob": -15.234375, "text": "<image>" }, { "id": 32000, "logprob": -14.4765625, "text": "<image>" }, { "id": 32000, "logprob": -10.6015625, "text": "<image>" }, { "id": 32000, "logprob": -12.0703125, "text": "<image>" }, { "id": 32000, "logprob": -12.84375, "text": "<image>" }, { "id": 32000, "logprob": -12.2421875, "text": "<image>" }, { "id": 32000, "logprob": -13.203125, "text": "<image>" }, { "id": 32000, "logprob": -11.3203125, "text": "<image>" }, { "id": 32000, "logprob": -10.9375, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -10.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.8125, "text": "<image>" }, { "id": 32000, "logprob": -11.8203125, "text": "<image>" }, { "id": 32000, "logprob": -12.296875, "text": "<image>" }, { "id": 32000, "logprob": -10.8046875, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -15.2265625, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -12.015625, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -11.109375, "text": "<image>" }, { "id": 32000, "logprob": -12.703125, "text": "<image>" }, { "id": 32000, "logprob": -11.6328125, "text": "<image>" }, { "id": 32000, "logprob": -11.515625, "text": "<image>" }, { "id": 32000, "logprob": -11.546875, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.453125, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -12.9609375, "text": "<image>" }, { "id": 32000, "logprob": -12.578125, "text": "<image>" }, { "id": 32000, "logprob": -11.8671875, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -11.125, "text": "<image>" }, { "id": 32000, "logprob": -11.5, "text": "<image>" }, { "id": 32000, "logprob": -12.1484375, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -14.453125, "text": "<image>" }, { "id": 32000, "logprob": -12.7890625, "text": "<image>" }, { "id": 32000, "logprob": -14.890625, "text": "<image>" }, { "id": 32000, "logprob": -12.984375, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -14.7421875, "text": "<image>" }, { "id": 32000, "logprob": -13.34375, "text": "<image>" }, { "id": 32000, "logprob": -13.3828125, "text": "<image>" }, { "id": 32000, "logprob": -14.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.71875, "text": "<image>" }, { "id": 32000, "logprob": -14.8203125, "text": "<image>" }, { "id": 32000, "logprob": -14.625, "text": "<image>" }, { "id": 32000, "logprob": -12.6875, "text": "<image>" }, { "id": 32000, "logprob": -12.4296875, "text": "<image>" }, { "id": 32000, "logprob": -12.6171875, "text": "<image>" }, { "id": 32000, "logprob": -13.765625, "text": "<image>" }, { "id": 32000, "logprob": -13.8046875, "text": "<image>" }, { "id": 32000, "logprob": -11.4921875, "text": "<image>" }, { "id": 32000, "logprob": -12.5078125, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -13.796875, "text": "<image>" }, { "id": 32000, "logprob": -11.203125, "text": "<image>" }, { "id": 32000, "logprob": -11.78125, "text": "<image>" }, { "id": 32000, "logprob": -16.53125, "text": "<image>" }, { "id": 32000, "logprob": -12.296875, "text": "<image>" }, { "id": 32000, "logprob": -14.1484375, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -12.4921875, "text": "<image>" }, { "id": 32000, "logprob": -10.59375, "text": "<image>" }, { "id": 32000, "logprob": -12.9296875, "text": "<image>" }, { "id": 32000, "logprob": -13.6328125, "text": "<image>" }, { "id": 32000, "logprob": -12.3203125, "text": "<image>" }, { "id": 32000, "logprob": -12.359375, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -12.1953125, "text": "<image>" }, { "id": 32000, "logprob": -12.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.6484375, "text": "<image>" }, { "id": 32000, "logprob": -11.109375, "text": "<image>" }, { "id": 32000, "logprob": -11.140625, "text": "<image>" }, { "id": 32000, "logprob": -13.90625, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -14.9921875, "text": "<image>" }, { "id": 32000, "logprob": -13.375, "text": "<image>" }, { "id": 32000, "logprob": -14.703125, "text": "<image>" }, { "id": 32000, "logprob": -11.328125, "text": "<image>" }, { "id": 32000, "logprob": -14.890625, "text": "<image>" }, { "id": 32000, "logprob": -13.1875, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -13.3984375, "text": "<image>" }, { "id": 32000, "logprob": -13.9453125, "text": "<image>" }, { "id": 32000, "logprob": -13.1875, "text": "<image>" }, { "id": 32000, "logprob": -11.28125, "text": "<image>" }, { "id": 32000, "logprob": -15.1640625, "text": "<image>" }, { "id": 32000, "logprob": -15.0234375, "text": "<image>" }, { "id": 32000, "logprob": -14.8203125, "text": "<image>" }, { "id": 32000, "logprob": -14.828125, "text": "<image>" }, { "id": 32000, "logprob": -14.078125, "text": "<image>" }, { "id": 32000, "logprob": -13.8984375, "text": "<image>" }, { "id": 32000, "logprob": -15.3125, "text": "<image>" }, { "id": 32000, "logprob": -12.8125, "text": "<image>" }, { "id": 32000, "logprob": -12.90625, "text": "<image>" }, { "id": 32000, "logprob": -13.046875, "text": "<image>" }, { "id": 32000, "logprob": -10.625, "text": "<image>" }, { "id": 32000, "logprob": -11.25, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -13.3984375, "text": "<image>" }, { "id": 32000, "logprob": -12.484375, "text": "<image>" }, { "id": 32000, "logprob": -12.2578125, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -12.6015625, "text": "<image>" }, { "id": 32000, "logprob": -12.421875, "text": "<image>" }, { "id": 32000, "logprob": -12.484375, "text": "<image>" }, { "id": 32000, "logprob": -14.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.3125, "text": "<image>" }, { "id": 32000, "logprob": -12.578125, "text": "<image>" }, { "id": 32000, "logprob": -15.1953125, "text": "<image>" }, { "id": 32000, "logprob": -12.4921875, "text": "<image>" }, { "id": 32000, "logprob": -11.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -12.15625, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.7734375, "text": "<image>" }, { "id": 32000, "logprob": -11.3046875, "text": "<image>" }, { "id": 32000, "logprob": -11.109375, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -13.0234375, "text": "<image>" }, { "id": 32000, "logprob": -10.9296875, "text": "<image>" }, { "id": 32000, "logprob": -13.5078125, "text": "<image>" }, { "id": 32000, "logprob": -11.5, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -11.703125, "text": "<image>" }, { "id": 32000, "logprob": -14.0234375, "text": "<image>" }, { "id": 32000, "logprob": -15.2578125, "text": "<image>" }, { "id": 32000, "logprob": -17.953125, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -11.6875, "text": "<image>" }, { "id": 32000, "logprob": -12.1171875, "text": "<image>" }, { "id": 32000, "logprob": -11.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.90625, "text": "<image>" }, { "id": 32000, "logprob": -12.6171875, "text": "<image>" }, { "id": 32000, "logprob": -13.53125, "text": "<image>" }, { "id": 32000, "logprob": -14.828125, "text": "<image>" }, { "id": 32000, "logprob": -13.953125, "text": "<image>" }, { "id": 32000, "logprob": -11.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.7421875, "text": "<image>" }, { "id": 32000, "logprob": -13.484375, "text": "<image>" }, { "id": 32000, "logprob": -11.4609375, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.984375, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -15.9375, "text": "<image>" }, { "id": 32000, "logprob": -11.984375, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -12.03125, "text": "<image>" }, { "id": 32000, "logprob": -14.3046875, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -12.1171875, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -13.1484375, "text": "<image>" }, { "id": 32000, "logprob": -12.1875, "text": "<image>" }, { "id": 32000, "logprob": -13.359375, "text": "<image>" }, { "id": 32000, "logprob": -13.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.2890625, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -10.7421875, "text": "<image>" }, { "id": 32000, "logprob": -10.515625, "text": "<image>" }, { "id": 32000, "logprob": -12.6640625, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -11.484375, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -11.890625, "text": "<image>" }, { "id": 32000, "logprob": -11.140625, "text": "<image>" }, { "id": 32000, "logprob": -10.96875, "text": "<image>" }, { "id": 32000, "logprob": -11.609375, "text": "<image>" }, { "id": 32000, "logprob": -12.4609375, "text": "<image>" }, { "id": 32000, "logprob": -12.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.8203125, "text": "<image>" }, { "id": 32000, "logprob": -13.359375, "text": "<image>" }, { "id": 32000, "logprob": -12.0234375, "text": "<image>" }, { "id": 32000, "logprob": -12.484375, "text": "<image>" }, { "id": 32000, "logprob": -14.2890625, "text": "<image>" }, { "id": 32000, "logprob": -11.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.890625, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -15.3046875, "text": "<image>" }, { "id": 32000, "logprob": -12.8828125, "text": "<image>" }, { "id": 32000, "logprob": -13.453125, "text": "<image>" }, { "id": 32000, "logprob": -11.8125, "text": "<image>" }, { "id": 32000, "logprob": -12.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.9765625, "text": "<image>" }, { "id": 32000, "logprob": -11.96875, "text": "<image>" }, { "id": 32000, "logprob": -13.4296875, "text": "<image>" }, { "id": 32000, "logprob": -14.9453125, "text": "<image>" }, { "id": 32000, "logprob": -13.28125, "text": "<image>" }, { "id": 32000, "logprob": -13.2578125, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -11.453125, "text": "<image>" }, { "id": 32000, "logprob": -12.1640625, "text": "<image>" }, { "id": 32000, "logprob": -10.3828125, "text": "<image>" }, { "id": 32000, "logprob": -11.8203125, "text": "<image>" }, { "id": 32000, "logprob": -13.203125, "text": "<image>" }, { "id": 32000, "logprob": -12.3359375, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -13.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.65625, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.9375, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -12.3515625, "text": "<image>" }, { "id": 32000, "logprob": -10.5625, "text": "<image>" }, { "id": 32000, "logprob": -10.90625, "text": "<image>" }, { "id": 32000, "logprob": -11.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.3828125, "text": "<image>" }, { "id": 32000, "logprob": -11.5234375, "text": "<image>" }, { "id": 32000, "logprob": -16.578125, "text": "<image>" }, { "id": 32000, "logprob": -12.515625, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -12.5390625, "text": "<image>" }, { "id": 32000, "logprob": -12.0078125, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -13.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -11.9296875, "text": "<image>" }, { "id": 32000, "logprob": -10.59375, "text": "<image>" }, { "id": 32000, "logprob": -10.53125, "text": "<image>" }, { "id": 32000, "logprob": -11.4375, "text": "<image>" }, { "id": 32000, "logprob": -11.2109375, "text": "<image>" }, { "id": 32000, "logprob": -13.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.15625, "text": "<image>" }, { "id": 32000, "logprob": -15.796875, "text": "<image>" }, { "id": 32000, "logprob": -10.9765625, "text": "<image>" }, { "id": 32000, "logprob": -11.6328125, "text": "<image>" }, { "id": 32000, "logprob": -10.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.859375, "text": "<image>" }, { "id": 32000, "logprob": -13.125, "text": "<image>" }, { "id": 32000, "logprob": -12.6640625, "text": "<image>" }, { "id": 32000, "logprob": -16.046875, "text": "<image>" }, { "id": 32000, "logprob": -11.515625, "text": "<image>" }, { "id": 32000, "logprob": -13.734375, "text": "<image>" }, { "id": 32000, "logprob": -14.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.2109375, "text": "<image>" }, { "id": 32000, "logprob": -10.65625, "text": "<image>" }, { "id": 32000, "logprob": -15.6484375, "text": "<image>" }, { "id": 32000, "logprob": -10.9296875, "text": "<image>" }, { "id": 32000, "logprob": -11.328125, "text": "<image>" }, { "id": 32000, "logprob": -11.0546875, "text": "<image>" }, { "id": 32000, "logprob": -10.4765625, "text": "<image>" }, { "id": 32000, "logprob": -10.96875, "text": "<image>" }, { "id": 32000, "logprob": -10.890625, "text": "<image>" }, { "id": 32000, "logprob": -10.8046875, "text": "<image>" }, { "id": 32000, "logprob": -12.359375, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -14.2734375, "text": "<image>" }, { "id": 32000, "logprob": -11.4921875, "text": "<image>" }, { "id": 32000, "logprob": -10.984375, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -13.21875, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -11.8828125, "text": "<image>" }, { "id": 32000, "logprob": -12.15625, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -12.0703125, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -13.0390625, "text": "<image>" }, { "id": 32000, "logprob": -12.1484375, "text": "<image>" }, { "id": 32000, "logprob": -13.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.203125, "text": "<image>" }, { "id": 32000, "logprob": -11.9375, "text": "<image>" }, { "id": 32000, "logprob": -12.1796875, "text": "<image>" }, { "id": 32000, "logprob": -10.7421875, "text": "<image>" }, { "id": 32000, "logprob": -10.8203125, "text": "<image>" }, { "id": 32000, "logprob": -12.375, "text": "<image>" }, { "id": 32000, "logprob": -11.484375, "text": "<image>" }, { "id": 32000, "logprob": -15.84375, "text": "<image>" }, { "id": 32000, "logprob": -12.1640625, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -13.1640625, "text": "<image>" }, { "id": 32000, "logprob": -16.8125, "text": "<image>" }, { "id": 32000, "logprob": -15.765625, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -12.46875, "text": "<image>" }, { "id": 32000, "logprob": -13.109375, "text": "<image>" }, { "id": 32000, "logprob": -14.5078125, "text": "<image>" }, { "id": 32000, "logprob": -14.5546875, "text": "<image>" }, { "id": 32000, "logprob": -10.953125, "text": "<image>" }, { "id": 32000, "logprob": -13.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.3984375, "text": "<image>" }, { "id": 32000, "logprob": -12.9375, "text": "<image>" }, { "id": 32000, "logprob": -12.953125, "text": "<image>" }, { "id": 32000, "logprob": -12.0234375, "text": "<image>" }, { "id": 32000, "logprob": -12.3125, "text": "<image>" }, { "id": 32000, "logprob": -17.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.5, "text": "<image>" }, { "id": 32000, "logprob": -12.484375, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.84375, "text": "<image>" }, { "id": 32000, "logprob": -13.515625, "text": "<image>" }, { "id": 32000, "logprob": -14.171875, "text": "<image>" }, { "id": 32000, "logprob": -12.9609375, "text": "<image>" }, { "id": 32000, "logprob": -11.265625, "text": "<image>" }, { "id": 32000, "logprob": -14.828125, "text": "<image>" }, { "id": 32000, "logprob": -11.1171875, "text": "<image>" }, { "id": 32000, "logprob": -11.953125, "text": "<image>" }, { "id": 32000, "logprob": -14.6640625, "text": "<image>" }, { "id": 32000, "logprob": -11.2421875, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -12.8125, "text": "<image>" }, { "id": 32000, "logprob": -11.25, "text": "<image>" }, { "id": 32000, "logprob": -11.8125, "text": "<image>" }, { "id": 32000, "logprob": -14.28125, "text": "<image>" }, { "id": 32000, "logprob": -11.09375, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.1875, "text": "<image>" }, { "id": 32000, "logprob": -13.765625, "text": "<image>" }, { "id": 32000, "logprob": -12.25, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -13.796875, "text": "<image>" }, { "id": 32000, "logprob": -10.6171875, "text": "<image>" }, { "id": 32000, "logprob": -11.546875, "text": "<image>" }, { "id": 32000, "logprob": -10.9609375, "text": "<image>" }, { "id": 32000, "logprob": -12.1171875, "text": "<image>" }, { "id": 32000, "logprob": -10.796875, "text": "<image>" }, { "id": 32000, "logprob": -12.6328125, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -13.140625, "text": "<image>" }, { "id": 32000, "logprob": -14.21875, "text": "<image>" }, { "id": 32000, "logprob": -13.0546875, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -11.6875, "text": "<image>" }, { "id": 32000, "logprob": -12.7265625, "text": "<image>" }, { "id": 32000, "logprob": -17.09375, "text": "<image>" }, { "id": 32000, "logprob": -15.4375, "text": "<image>" }, { "id": 32000, "logprob": -11.890625, "text": "<image>" }, { "id": 32000, "logprob": -13.6015625, "text": "<image>" }, { "id": 32000, "logprob": -11.4921875, "text": "<image>" }, { "id": 32000, "logprob": -11.8984375, "text": "<image>" }, { "id": 32000, "logprob": -13.375, "text": "<image>" }, { "id": 32000, "logprob": -13.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.5390625, "text": "<image>" }, { "id": 32000, "logprob": -15.1171875, "text": "<image>" }, { "id": 32000, "logprob": -15.90625, "text": "<image>" }, { "id": 32000, "logprob": -12.828125, "text": "<image>" }, { "id": 32000, "logprob": -12.7109375, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -13.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.859375, "text": "<image>" }, { "id": 32000, "logprob": -11.6953125, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -10.59375, "text": "<image>" }, { "id": 32000, "logprob": -10.8828125, "text": "<image>" }, { "id": 32000, "logprob": -11.34375, "text": "<image>" }, { "id": 32000, "logprob": -12.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.515625, "text": "<image>" }, { "id": 32000, "logprob": -12.6796875, "text": "<image>" }, { "id": 32000, "logprob": -10.7578125, "text": "<image>" }, { "id": 32000, "logprob": -11.40625, "text": "<image>" }, { "id": 32000, "logprob": -10.75, "text": "<image>" }, { "id": 32000, "logprob": -11.2109375, "text": "<image>" }, { "id": 32000, "logprob": -10.875, "text": "<image>" }, { "id": 32000, "logprob": -13.1328125, "text": "<image>" }, { "id": 32000, "logprob": -10.53125, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -12.5234375, "text": "<image>" }, { "id": 32000, "logprob": -17.0, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -11.3203125, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -11.2421875, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -11.4765625, "text": "<image>" }, { "id": 32000, "logprob": -11.34375, "text": "<image>" }, { "id": 32000, "logprob": -12.6875, "text": "<image>" }, { "id": 32000, "logprob": -13.421875, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -13.46875, "text": "<image>" }, { "id": 32000, "logprob": -12.3125, "text": "<image>" }, { "id": 32000, "logprob": -11.6015625, "text": "<image>" }, { "id": 32000, "logprob": -13.359375, "text": "<image>" }, { "id": 32000, "logprob": -13.0625, "text": "<image>" }, { "id": 32000, "logprob": -11.2421875, "text": "<image>" }, { "id": 32000, "logprob": -15.34375, "text": "<image>" }, { "id": 32000, "logprob": -13.5, "text": "<image>" }, { "id": 32000, "logprob": -10.7734375, "text": "<image>" }, { "id": 32000, "logprob": -13.5390625, "text": "<image>" }, { "id": 32000, "logprob": -12.703125, "text": "<image>" }, { "id": 32000, "logprob": -13.1953125, "text": "<image>" }, { "id": 32000, "logprob": -17.171875, "text": "<image>" }, { "id": 32000, "logprob": -15.0234375, "text": "<image>" }, { "id": 32000, "logprob": -10.9609375, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -11.515625, "text": "<image>" }, { "id": 32000, "logprob": -16.125, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.609375, "text": "<image>" }, { "id": 32000, "logprob": -14.6015625, "text": "<image>" }, { "id": 32000, "logprob": -11.8984375, "text": "<image>" }, { "id": 32000, "logprob": -10.9375, "text": "<image>" }, { "id": 32000, "logprob": -13.3203125, "text": "<image>" }, { "id": 32000, "logprob": -11.78125, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -12.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.0, "text": "<image>" }, { "id": 32000, "logprob": -13.796875, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -12.4140625, "text": "<image>" }, { "id": 32000, "logprob": -14.3203125, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -13.875, "text": "<image>" }, { "id": 32000, "logprob": -14.3203125, "text": "<image>" }, { "id": 32000, "logprob": -12.375, "text": "<image>" }, { "id": 32000, "logprob": -13.9375, "text": "<image>" }, { "id": 32000, "logprob": -12.3203125, "text": "<image>" }, { "id": 32000, "logprob": -14.609375, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -10.6484375, "text": "<image>" }, { "id": 32000, "logprob": -12.0, "text": "<image>" }, { "id": 32000, "logprob": -11.0703125, "text": "<image>" }, { "id": 32000, "logprob": -11.5234375, "text": "<image>" }, { "id": 32000, "logprob": -11.5625, "text": "<image>" }, { "id": 32000, "logprob": -11.7265625, "text": "<image>" }, { "id": 32000, "logprob": -12.6640625, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -12.1171875, "text": "<image>" }, { "id": 32000, "logprob": -16.09375, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -13.4140625, "text": "<image>" }, { "id": 32000, "logprob": -13.453125, "text": "<image>" }, { "id": 32000, "logprob": -15.4921875, "text": "<image>" }, { "id": 32000, "logprob": -12.9140625, "text": "<image>" }, { "id": 32000, "logprob": -14.8125, "text": "<image>" }, { "id": 32000, "logprob": -13.375, "text": "<image>" }, { "id": 32000, "logprob": -12.0703125, "text": "<image>" }, { "id": 32000, "logprob": -15.234375, "text": "<image>" }, { "id": 32000, "logprob": -11.6875, "text": "<image>" }, { "id": 32000, "logprob": -10.5390625, "text": "<image>" }, { "id": 32000, "logprob": -14.703125, "text": "<image>" }, { "id": 32000, "logprob": -13.9140625, "text": "<image>" }, { "id": 32000, "logprob": -11.6875, "text": "<image>" }, { "id": 32000, "logprob": -17.453125, "text": "<image>" }, { "id": 32000, "logprob": -12.2578125, "text": "<image>" }, { "id": 32000, "logprob": -10.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.7890625, "text": "<image>" }, { "id": 32000, "logprob": -11.5, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -14.265625, "text": "<image>" }, { "id": 32000, "logprob": -13.6953125, "text": "<image>" }, { "id": 32000, "logprob": -9.796875, "text": "<image>" }, { "id": 32000, "logprob": -10.59375, "text": "<image>" }, { "id": 32000, "logprob": -12.890625, "text": "<image>" }, { "id": 32000, "logprob": -11.9375, "text": "<image>" }, { "id": 32000, "logprob": -14.734375, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -14.7265625, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -12.7890625, "text": "<image>" }, { "id": 32000, "logprob": -11.015625, "text": "<image>" }, { "id": 32000, "logprob": -16.421875, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -11.46875, "text": "<image>" }, { "id": 32000, "logprob": -16.53125, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -13.3515625, "text": "<image>" }, { "id": 32000, "logprob": -13.96875, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.84375, "text": "<image>" }, { "id": 32000, "logprob": -11.96875, "text": "<image>" }, { "id": 32000, "logprob": -12.515625, "text": "<image>" }, { "id": 32000, "logprob": -12.6328125, "text": "<image>" }, { "id": 32000, "logprob": -11.8046875, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -14.046875, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -17.203125, "text": "<image>" }, { "id": 32000, "logprob": -13.1640625, "text": "<image>" }, { "id": 32000, "logprob": -13.375, "text": "<image>" }, { "id": 32000, "logprob": -14.609375, "text": "<image>" }, { "id": 32000, "logprob": -12.953125, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -15.0234375, "text": "<image>" }, { "id": 32000, "logprob": -11.390625, "text": "<image>" }, { "id": 32000, "logprob": -15.9296875, "text": "<image>" }, { "id": 32000, "logprob": -10.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.828125, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -12.75, "text": "<image>" }, { "id": 32000, "logprob": -14.7734375, "text": "<image>" }, { "id": 32000, "logprob": -11.484375, "text": "<image>" }, { "id": 32000, "logprob": -11.2578125, "text": "<image>" }, { "id": 32000, "logprob": -10.734375, "text": "<image>" }, { "id": 32000, "logprob": -10.875, "text": "<image>" }, { "id": 32000, "logprob": -11.421875, "text": "<image>" }, { "id": 32000, "logprob": -12.96875, "text": "<image>" }, { "id": 32000, "logprob": -13.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.03125, "text": "<image>" }, { "id": 32000, "logprob": -17.015625, "text": "<image>" }, { "id": 32000, "logprob": -12.2109375, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -13.09375, "text": "<image>" }, { "id": 32000, "logprob": -11.84375, "text": "<image>" }, { "id": 32000, "logprob": -15.2578125, "text": "<image>" }, { "id": 32000, "logprob": -16.984375, "text": "<image>" }, { "id": 32000, "logprob": -11.0546875, "text": "<image>" }, { "id": 32000, "logprob": -11.4609375, "text": "<image>" }, { "id": 32000, "logprob": -11.8046875, "text": "<image>" }, { "id": 32000, "logprob": -11.140625, "text": "<image>" }, { "id": 32000, "logprob": -10.6875, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -17.984375, "text": "<image>" }, { "id": 32000, "logprob": -12.4140625, "text": "<image>" }, { "id": 32000, "logprob": -14.703125, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -14.4609375, "text": "<image>" }, { "id": 32000, "logprob": -11.7109375, "text": "<image>" }, { "id": 32000, "logprob": -12.828125, "text": "<image>" }, { "id": 32000, "logprob": -12.8359375, "text": "<image>" }, { "id": 32000, "logprob": -11.484375, "text": "<image>" }, { "id": 32000, "logprob": -13.5390625, "text": "<image>" }, { "id": 32000, "logprob": -16.21875, "text": "<image>" }, { "id": 32000, "logprob": -16.84375, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -14.359375, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -13.6953125, "text": "<image>" }, { "id": 32000, "logprob": -12.984375, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -17.59375, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -15.046875, "text": "<image>" }, { "id": 32000, "logprob": -16.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -13.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -11.3203125, "text": "<image>" }, { "id": 32000, "logprob": -11.359375, "text": "<image>" }, { "id": 32000, "logprob": -12.46875, "text": "<image>" }, { "id": 32000, "logprob": -11.5546875, "text": "<image>" }, { "id": 32000, "logprob": -11.3125, "text": "<image>" }, { "id": 32000, "logprob": -13.1640625, "text": "<image>" }, { "id": 32000, "logprob": -14.5859375, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -11.765625, "text": "<image>" }, { "id": 32000, "logprob": -13.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.4296875, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -12.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.09375, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -11.40625, "text": "<image>" }, { "id": 32000, "logprob": -12.875, "text": "<image>" }, { "id": 32000, "logprob": -11.9609375, "text": "<image>" }, { "id": 32000, "logprob": -13.078125, "text": "<image>" }, { "id": 32000, "logprob": -12.625, "text": "<image>" }, { "id": 32000, "logprob": -11.609375, "text": "<image>" }, { "id": 32000, "logprob": -11.1328125, "text": "<image>" }, { "id": 32000, "logprob": -15.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.0859375, "text": "<image>" }, { "id": 32000, "logprob": -14.25, "text": "<image>" }, { "id": 32000, "logprob": -12.2265625, "text": "<image>" }, { "id": 32000, "logprob": -16.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.8125, "text": "<image>" }, { "id": 32000, "logprob": -12.7890625, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -12.3125, "text": "<image>" }, { "id": 32000, "logprob": -12.3203125, "text": "<image>" }, { "id": 32000, "logprob": -11.9609375, "text": "<image>" }, { "id": 32000, "logprob": -14.2734375, "text": "<image>" }, { "id": 32000, "logprob": -14.90625, "text": "<image>" }, { "id": 32000, "logprob": -12.8125, "text": "<image>" }, { "id": 32000, "logprob": -14.546875, "text": "<image>" }, { "id": 32000, "logprob": -19.4375, "text": "<image>" }, { "id": 32000, "logprob": -12.2109375, "text": "<image>" }, { "id": 32000, "logprob": -13.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.4453125, "text": "<image>" }, { "id": 32000, "logprob": -12.2265625, "text": "<image>" }, { "id": 32000, "logprob": -12.6640625, "text": "<image>" }, { "id": 32000, "logprob": -12.3359375, "text": "<image>" }, { "id": 32000, "logprob": -17.640625, "text": "<image>" }, { "id": 32000, "logprob": -17.734375, "text": "<image>" }, { "id": 32000, "logprob": -11.515625, "text": "<image>" }, { "id": 32000, "logprob": -12.0078125, "text": "<image>" }, { "id": 32000, "logprob": -13.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.5703125, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.328125, "text": "<image>" }, { "id": 32000, "logprob": -13.484375, "text": "<image>" }, { "id": 32000, "logprob": -10.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -12.9375, "text": "<image>" }, { "id": 32000, "logprob": -13.2890625, "text": "<image>" }, { "id": 32000, "logprob": -14.34375, "text": "<image>" }, { "id": 32000, "logprob": -14.5, "text": "<image>" }, { "id": 32000, "logprob": -11.578125, "text": "<image>" }, { "id": 32000, "logprob": -11.6953125, "text": "<image>" }, { "id": 32000, "logprob": -14.875, "text": "<image>" }, { "id": 32000, "logprob": -11.03125, "text": "<image>" }, { "id": 32000, "logprob": -11.34375, "text": "<image>" }, { "id": 32000, "logprob": -11.75, "text": "<image>" }, { "id": 32000, "logprob": -11.7265625, "text": "<image>" }, { "id": 32000, "logprob": -11.8984375, "text": "<image>" }, { "id": 32000, "logprob": -11.9296875, "text": "<image>" }, { "id": 32000, "logprob": -11.328125, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -11.5859375, "text": "<image>" }, { "id": 32000, "logprob": -15.046875, "text": "<image>" }, { "id": 32000, "logprob": -12.2109375, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -16.875, "text": "<image>" }, { "id": 32000, "logprob": -12.1640625, "text": "<image>" }, { "id": 32000, "logprob": -14.7890625, "text": "<image>" }, { "id": 32000, "logprob": -11.9609375, "text": "<image>" }, { "id": 32000, "logprob": -12.796875, "text": "<image>" }, { "id": 32000, "logprob": -11.984375, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -12.703125, "text": "<image>" }, { "id": 32000, "logprob": -14.4765625, "text": "<image>" }, { "id": 32000, "logprob": -10.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.0234375, "text": "<image>" }, { "id": 32000, "logprob": -11.5625, "text": "<image>" }, { "id": 32000, "logprob": -14.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.8125, "text": "<image>" }, { "id": 32000, "logprob": -12.4453125, "text": "<image>" }, { "id": 32000, "logprob": -12.6640625, "text": "<image>" }, { "id": 32000, "logprob": -12.0078125, "text": "<image>" }, { "id": 32000, "logprob": -12.1640625, "text": "<image>" }, { "id": 32000, "logprob": -14.875, "text": "<image>" }, { "id": 32000, "logprob": -11.59375, "text": "<image>" }, { "id": 32000, "logprob": -10.75, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -13.265625, "text": "<image>" }, { "id": 32000, "logprob": -11.1328125, "text": "<image>" }, { "id": 32000, "logprob": -16.921875, "text": "<image>" }, { "id": 32000, "logprob": -12.8828125, "text": "<image>" }, { "id": 32000, "logprob": -13.53125, "text": "<image>" }, { "id": 32000, "logprob": -14.625, "text": "<image>" }, { "id": 32000, "logprob": -10.9375, "text": "<image>" }, { "id": 32000, "logprob": -11.390625, "text": "<image>" }, { "id": 32000, "logprob": -17.09375, "text": "<image>" }, { "id": 32000, "logprob": -13.7421875, "text": "<image>" }, { "id": 32000, "logprob": -12.84375, "text": "<image>" }, { "id": 32000, "logprob": -11.2734375, "text": "<image>" }, { "id": 32000, "logprob": -11.734375, "text": "<image>" }, { "id": 32000, "logprob": -11.2109375, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -14.328125, "text": "<image>" }, { "id": 32000, "logprob": -11.9375, "text": "<image>" }, { "id": 32000, "logprob": -13.1875, "text": "<image>" }, { "id": 32000, "logprob": -11.421875, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -11.078125, "text": "<image>" }, { "id": 32000, "logprob": -11.609375, "text": "<image>" }, { "id": 32000, "logprob": -12.6328125, "text": "<image>" }, { "id": 32000, "logprob": -10.890625, "text": "<image>" }, { "id": 32000, "logprob": -12.71875, "text": "<image>" }, { "id": 32000, "logprob": -13.453125, "text": "<image>" }, { "id": 32000, "logprob": -12.4140625, "text": "<image>" }, { "id": 32000, "logprob": -12.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -16.546875, "text": "<image>" }, { "id": 32000, "logprob": -13.15625, "text": "<image>" }, { "id": 32000, "logprob": -11.0546875, "text": "<image>" }, { "id": 32000, "logprob": -13.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.828125, "text": "<image>" }, { "id": 32000, "logprob": -14.3515625, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -12.84375, "text": "<image>" }, { "id": 32000, "logprob": -12.9296875, "text": "<image>" }, { "id": 32000, "logprob": -11.46875, "text": "<image>" }, { "id": 32000, "logprob": -11.625, "text": "<image>" }, { "id": 32000, "logprob": -14.125, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -14.703125, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -10.75, "text": "<image>" }, { "id": 32000, "logprob": -12.78125, "text": "<image>" }, { "id": 32000, "logprob": -15.140625, "text": "<image>" }, { "id": 32000, "logprob": -13.59375, "text": "<image>" }, { "id": 32000, "logprob": -11.6953125, "text": "<image>" }, { "id": 32000, "logprob": -11.390625, "text": "<image>" }, { "id": 32000, "logprob": -15.1953125, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.765625, "text": "<image>" }, { "id": 32000, "logprob": -12.921875, "text": "<image>" }, { "id": 32000, "logprob": -13.453125, "text": "<image>" }, { "id": 32000, "logprob": -15.5078125, "text": "<image>" }, { "id": 32000, "logprob": -12.3984375, "text": "<image>" }, { "id": 32000, "logprob": -13.78125, "text": "<image>" }, { "id": 32000, "logprob": -12.1875, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -12.8984375, "text": "<image>" }, { "id": 32000, "logprob": -11.7109375, "text": "<image>" }, { "id": 32000, "logprob": -11.734375, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -16.375, "text": "<image>" }, { "id": 32000, "logprob": -12.5, "text": "<image>" }, { "id": 32000, "logprob": -11.9609375, "text": "<image>" }, { "id": 32000, "logprob": -13.875, "text": "<image>" }, { "id": 32000, "logprob": -15.0, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -16.03125, "text": "<image>" }, { "id": 32000, "logprob": -11.734375, "text": "<image>" }, { "id": 32000, "logprob": -13.5859375, "text": "<image>" }, { "id": 32000, "logprob": -15.3359375, "text": "<image>" }, { "id": 32000, "logprob": -12.2421875, "text": "<image>" }, { "id": 32000, "logprob": -11.703125, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -16.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -14.578125, "text": "<image>" }, { "id": 32000, "logprob": -11.625, "text": "<image>" }, { "id": 32000, "logprob": -12.109375, "text": "<image>" }, { "id": 32000, "logprob": -12.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -15.8203125, "text": "<image>" }, { "id": 32000, "logprob": -17.703125, "text": "<image>" }, { "id": 32000, "logprob": -12.09375, "text": "<image>" }, { "id": 32000, "logprob": -11.828125, "text": "<image>" }, { "id": 32000, "logprob": -11.703125, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -13.53125, "text": "<image>" }, { "id": 32000, "logprob": -11.140625, "text": "<image>" }, { "id": 32000, "logprob": -16.75, "text": "<image>" }, { "id": 32000, "logprob": -13.0625, "text": "<image>" }, { "id": 32000, "logprob": -14.90625, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -15.1640625, "text": "<image>" }, { "id": 32000, "logprob": -11.828125, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.28125, "text": "<image>" }, { "id": 32000, "logprob": -16.953125, "text": "<image>" }, { "id": 32000, "logprob": -13.0546875, "text": "<image>" }, { "id": 32000, "logprob": -14.890625, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -11.375, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -12.34375, "text": "<image>" }, { "id": 32000, "logprob": -11.84375, "text": "<image>" }, { "id": 32000, "logprob": -13.7578125, "text": "<image>" }, { "id": 32000, "logprob": -12.6875, "text": "<image>" }, { "id": 32000, "logprob": -14.71875, "text": "<image>" }, { "id": 32000, "logprob": -12.578125, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -11.6875, "text": "<image>" }, { "id": 32000, "logprob": -11.40625, "text": "<image>" }, { "id": 32000, "logprob": -11.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.8046875, "text": "<image>" }, { "id": 32000, "logprob": -12.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -13.5234375, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -11.765625, "text": "<image>" }, { "id": 32000, "logprob": -12.421875, "text": "<image>" }, { "id": 32000, "logprob": -18.65625, "text": "<image>" }, { "id": 32000, "logprob": -15.6796875, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -13.6953125, "text": "<image>" }, { "id": 32000, "logprob": -15.0859375, "text": "<image>" }, { "id": 32000, "logprob": -14.0703125, "text": "<image>" }, { "id": 32000, "logprob": -14.53125, "text": "<image>" }, { "id": 32000, "logprob": -12.359375, "text": "<image>" }, { "id": 32000, "logprob": -12.0625, "text": "<image>" }, { "id": 32000, "logprob": -13.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.8203125, "text": "<image>" }, { "id": 32000, "logprob": -13.046875, "text": "<image>" }, { "id": 32000, "logprob": -13.078125, "text": "<image>" }, { "id": 32000, "logprob": -12.625, "text": "<image>" }, { "id": 32000, "logprob": -11.3203125, "text": "<image>" }, { "id": 32000, "logprob": -14.2109375, "text": "<image>" }, { "id": 32000, "logprob": -11.734375, "text": "<image>" }, { "id": 32000, "logprob": -13.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.578125, "text": "<image>" }, { "id": 32000, "logprob": -11.703125, "text": "<image>" }, { "id": 32000, "logprob": -10.65625, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -11.8984375, "text": "<image>" }, { "id": 32000, "logprob": -11.7578125, "text": "<image>" }, { "id": 32000, "logprob": -12.890625, "text": "<image>" }, { "id": 32000, "logprob": -11.453125, "text": "<image>" }, { "id": 32000, "logprob": -14.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.953125, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -12.53125, "text": "<image>" }, { "id": 32000, "logprob": -10.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.2265625, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -13.5546875, "text": "<image>" }, { "id": 32000, "logprob": -16.6875, "text": "<image>" }, { "id": 32000, "logprob": -11.96875, "text": "<image>" }, { "id": 32000, "logprob": -12.8203125, "text": "<image>" }, { "id": 32000, "logprob": -14.9140625, "text": "<image>" }, { "id": 32000, "logprob": -16.5625, "text": "<image>" }, { "id": 32000, "logprob": -15.171875, "text": "<image>" }, { "id": 32000, "logprob": -14.6796875, "text": "<image>" }, { "id": 32000, "logprob": -12.03125, "text": "<image>" }, { "id": 32000, "logprob": -12.9375, "text": "<image>" }, { "id": 32000, "logprob": -12.2109375, "text": "<image>" }, { "id": 32000, "logprob": -12.7421875, "text": "<image>" }, { "id": 32000, "logprob": -13.3515625, "text": "<image>" }, { "id": 32000, "logprob": -13.7890625, "text": "<image>" }, { "id": 32000, "logprob": -12.5, "text": "<image>" }, { "id": 32000, "logprob": -12.140625, "text": "<image>" }, { "id": 32000, "logprob": -15.4453125, "text": "<image>" }, { "id": 32000, "logprob": -14.640625, "text": "<image>" }, { "id": 32000, "logprob": -13.6171875, "text": "<image>" }, { "id": 32000, "logprob": -13.796875, "text": "<image>" }, { "id": 32000, "logprob": -12.7109375, "text": "<image>" }, { "id": 32000, "logprob": -13.2578125, "text": "<image>" }, { "id": 32000, "logprob": -12.96875, "text": "<image>" }, { "id": 32000, "logprob": -13.5390625, "text": "<image>" }, { "id": 32000, "logprob": -13.265625, "text": "<image>" }, { "id": 32000, "logprob": -12.4453125, "text": "<image>" }, { "id": 32000, "logprob": -11.390625, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -16.5625, "text": "<image>" }, { "id": 32000, "logprob": -14.171875, "text": "<image>" }, { "id": 32000, "logprob": -13.2265625, "text": "<image>" }, { "id": 32000, "logprob": -11.7890625, "text": "<image>" }, { "id": 32000, "logprob": -15.1171875, "text": "<image>" }, { "id": 32000, "logprob": -15.4453125, "text": "<image>" }, { "id": 32000, "logprob": -12.6015625, "text": "<image>" }, { "id": 32000, "logprob": -11.4921875, "text": "<image>" }, { "id": 32000, "logprob": -10.703125, "text": "<image>" }, { "id": 32000, "logprob": -11.15625, "text": "<image>" }, { "id": 32000, "logprob": -12.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.734375, "text": "<image>" }, { "id": 32000, "logprob": -17.953125, "text": "<image>" }, { "id": 32000, "logprob": -11.59375, "text": "<image>" }, { "id": 32000, "logprob": -11.625, "text": "<image>" }, { "id": 32000, "logprob": -14.921875, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.8515625, "text": "<image>" }, { "id": 32000, "logprob": -15.3046875, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -12.7265625, "text": "<image>" }, { "id": 32000, "logprob": -15.140625, "text": "<image>" }, { "id": 32000, "logprob": -14.9375, "text": "<image>" }, { "id": 32000, "logprob": -15.6875, "text": "<image>" }, { "id": 32000, "logprob": -16.015625, "text": "<image>" }, { "id": 32000, "logprob": -14.125, "text": "<image>" }, { "id": 32000, "logprob": -13.2265625, "text": "<image>" }, { "id": 32000, "logprob": -13.1640625, "text": "<image>" }, { "id": 32000, "logprob": -12.703125, "text": "<image>" }, { "id": 32000, "logprob": -11.5625, "text": "<image>" }, { "id": 32000, "logprob": -14.765625, "text": "<image>" }, { "id": 32000, "logprob": -14.3515625, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.9296875, "text": "<image>" }, { "id": 32000, "logprob": -15.296875, "text": "<image>" }, { "id": 32000, "logprob": -13.6328125, "text": "<image>" }, { "id": 32000, "logprob": -11.4296875, "text": "<image>" }, { "id": 32000, "logprob": -13.5078125, "text": "<image>" }, { "id": 32000, "logprob": -10.890625, "text": "<image>" }, { "id": 32000, "logprob": -14.1171875, "text": "<image>" }, { "id": 32000, "logprob": -12.15625, "text": "<image>" }, { "id": 32000, "logprob": -13.1171875, "text": "<image>" }, { "id": 32000, "logprob": -12.5625, "text": "<image>" }, { "id": 32000, "logprob": -13.359375, "text": "<image>" }, { "id": 32000, "logprob": -11.734375, "text": "<image>" }, { "id": 32000, "logprob": -10.953125, "text": "<image>" }, { "id": 32000, "logprob": -12.75, "text": "<image>" }, { "id": 32000, "logprob": -12.7734375, "text": "<image>" }, { "id": 32000, "logprob": -13.5, "text": "<image>" }, { "id": 32000, "logprob": -11.578125, "text": "<image>" }, { "id": 32000, "logprob": -11.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.5390625, "text": "<image>" }, { "id": 32000, "logprob": -13.3984375, "text": "<image>" }, { "id": 32000, "logprob": -19.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -11.609375, "text": "<image>" }, { "id": 32000, "logprob": -13.890625, "text": "<image>" }, { "id": 32000, "logprob": -13.3203125, "text": "<image>" }, { "id": 32000, "logprob": -12.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.0625, "text": "<image>" }, { "id": 32000, "logprob": -11.6953125, "text": "<image>" }, { "id": 32000, "logprob": -13.7265625, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -13.9140625, "text": "<image>" }, { "id": 32000, "logprob": -11.7109375, "text": "<image>" }, { "id": 32000, "logprob": -16.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.6953125, "text": "<image>" }, { "id": 32000, "logprob": -13.4765625, "text": "<image>" }, { "id": 32000, "logprob": -15.2421875, "text": "<image>" }, { "id": 32000, "logprob": -11.53125, "text": "<image>" }, { "id": 32000, "logprob": -14.578125, "text": "<image>" }, { "id": 32000, "logprob": -15.28125, "text": "<image>" }, { "id": 32000, "logprob": -14.0390625, "text": "<image>" }, { "id": 32000, "logprob": -12.9296875, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.296875, "text": "<image>" }, { "id": 32000, "logprob": -14.2421875, "text": "<image>" }, { "id": 32000, "logprob": -12.0703125, "text": "<image>" }, { "id": 32000, "logprob": -12.609375, "text": "<image>" }, { "id": 32000, "logprob": -13.5546875, "text": "<image>" }, { "id": 32000, "logprob": -11.265625, "text": "<image>" }, { "id": 32000, "logprob": -11.7421875, "text": "<image>" }, { "id": 32000, "logprob": -15.0546875, "text": "<image>" }, { "id": 32000, "logprob": -14.390625, "text": "<image>" }, { "id": 32000, "logprob": -11.1796875, "text": "<image>" }, { "id": 32000, "logprob": -13.1171875, "text": "<image>" }, { "id": 32000, "logprob": -11.421875, "text": "<image>" }, { "id": 32000, "logprob": -15.8203125, "text": "<image>" }, { "id": 32000, "logprob": -12.7734375, "text": "<image>" }, { "id": 32000, "logprob": -13.3125, "text": "<image>" }, { "id": 32000, "logprob": -11.4453125, "text": "<image>" }, { "id": 32000, "logprob": -14.2578125, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -12.546875, "text": "<image>" }, { "id": 32000, "logprob": -16.34375, "text": "<image>" }, { "id": 32000, "logprob": -13.875, "text": "<image>" }, { "id": 32000, "logprob": -14.6484375, "text": "<image>" }, { "id": 32000, "logprob": -12.6015625, "text": "<image>" }, { "id": 32000, "logprob": -11.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.515625, "text": "<image>" }, { "id": 32000, "logprob": -12.7578125, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -14.5703125, "text": "<image>" }, { "id": 32000, "logprob": -11.3515625, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -10.9296875, "text": "<image>" }, { "id": 32000, "logprob": -13.203125, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -12.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.953125, "text": "<image>" }, { "id": 32000, "logprob": -12.90625, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -11.4453125, "text": "<image>" }, { "id": 32000, "logprob": -14.9453125, "text": "<image>" }, { "id": 32000, "logprob": -13.625, "text": "<image>" }, { "id": 32000, "logprob": -11.2890625, "text": "<image>" }, { "id": 32000, "logprob": -12.9609375, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -12.5703125, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -12.328125, "text": "<image>" }, { "id": 32000, "logprob": -15.1875, "text": "<image>" }, { "id": 32000, "logprob": -11.7421875, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -15.0625, "text": "<image>" }, { "id": 32000, "logprob": -15.5703125, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.3359375, "text": "<image>" }, { "id": 32000, "logprob": -14.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.6171875, "text": "<image>" }, { "id": 32000, "logprob": -14.6640625, "text": "<image>" }, { "id": 32000, "logprob": -15.34375, "text": "<image>" }, { "id": 32000, "logprob": -15.2109375, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -13.703125, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -11.7890625, "text": "<image>" }, { "id": 32000, "logprob": -12.0, "text": "<image>" }, { "id": 32000, "logprob": -12.8828125, "text": "<image>" }, { "id": 32000, "logprob": -16.375, "text": "<image>" }, { "id": 32000, "logprob": -12.78125, "text": "<image>" }, { "id": 32000, "logprob": -12.4453125, "text": "<image>" }, { "id": 32000, "logprob": -13.3828125, "text": "<image>" }, { "id": 32000, "logprob": -13.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.8828125, "text": "<image>" }, { "id": 32000, "logprob": -11.453125, "text": "<image>" }, { "id": 32000, "logprob": -12.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.84375, "text": "<image>" }, { "id": 32000, "logprob": -11.2734375, "text": "<image>" }, { "id": 32000, "logprob": -14.71875, "text": "<image>" }, { "id": 32000, "logprob": -14.265625, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -13.6015625, "text": "<image>" }, { "id": 32000, "logprob": -13.6484375, "text": "<image>" }, { "id": 32000, "logprob": -11.3046875, "text": "<image>" }, { "id": 32000, "logprob": -11.96875, "text": "<image>" }, { "id": 32000, "logprob": -13.3984375, "text": "<image>" }, { "id": 32000, "logprob": -12.921875, "text": "<image>" }, { "id": 32000, "logprob": -14.2109375, "text": "<image>" }, { "id": 32000, "logprob": -12.875, "text": "<image>" }, { "id": 32000, "logprob": -13.875, "text": "<image>" }, { "id": 32000, "logprob": -13.359375, "text": "<image>" }, { "id": 32000, "logprob": -13.2109375, "text": "<image>" }, { "id": 32000, "logprob": -11.984375, "text": "<image>" }, { "id": 32000, "logprob": -15.140625, "text": "<image>" }, { "id": 32000, "logprob": -12.015625, "text": "<image>" }, { "id": 32000, "logprob": -11.03125, "text": "<image>" }, { "id": 32000, "logprob": -14.40625, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -13.7734375, "text": "<image>" }, { "id": 32000, "logprob": -13.5546875, "text": "<image>" }, { "id": 32000, "logprob": -11.7265625, "text": "<image>" }, { "id": 32000, "logprob": -13.390625, "text": "<image>" }, { "id": 32000, "logprob": -16.84375, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -17.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -12.71875, "text": "<image>" }, { "id": 32000, "logprob": -16.0, "text": "<image>" }, { "id": 32000, "logprob": -16.625, "text": "<image>" }, { "id": 32000, "logprob": -12.4921875, "text": "<image>" }, { "id": 2418, "logprob": -19.0625, "text": "Can" }, { "id": 368, "logprob": -0.19604492, "text": "you" }, { "id": 1912, "logprob": -1.5058594, "text": "tell" }, { "id": 528, "logprob": -0.31030273, "text": "me" }, { "id": 264, "logprob": -2.6367188, "text": "a" }, { "id": 1215, "logprob": -9.1015625, "text": "very" }, { "id": 2485, "logprob": -0.9975586, "text": "short" }, { "id": 2838, "logprob": -0.4633789, "text": "story" }, { "id": 2818, "logprob": -3.3144531, "text": "based" }, { "id": 356, "logprob": -0.029037476, "text": "on" }, { "id": 272, "logprob": -0.9902344, "text": "the" }, { "id": 3469, "logprob": -0.2890625, "text": "image" }, { "id": 28804, "logprob": -0.42895508, "text": "?" } ], "seed": null, "tokens": [ { "id": 13, "logprob": -0.007621765, "special": false, "text": "\n" }, { "id": 13, "logprob": -0.20812988, "special": false, "text": "\n" }, { "id": 16114, "logprob": -1.2587891, "special": false, "text": "Once" }, { "id": 3714, "logprob": -0.20825195, "special": false, "text": " upon" }, { "id": 264, "logprob": -0.0017709732, "special": false, "text": " a" }, { "id": 727, "logprob": -0.011932373, "special": false, "text": " time" }, { "id": 28725, "logprob": -0.17297363, "special": false, "text": "," }, { "id": 736, "logprob": -0.9057617, "special": false, "text": " there" }, { "id": 403, "logprob": -0.05758667, "special": false, "text": " was" }, { "id": 264, "logprob": -0.00970459, "special": false, "text": " a" } ], "top_tokens": null }, "generated_text": "\n\nOnce upon a time, there was a" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 1247, "logprob": -2.390625, "text": "User" }, { "id": 28747, "logprob": -12.328125, "text": ":" }, { "id": 32000, "logprob": -10.4375, "text": "<image>" }, { "id": 32000, "logprob": -10.6953125, "text": "<image>" }, { "id": 32000, "logprob": -15.828125, "text": "<image>" }, { "id": 32000, "logprob": -10.40625, "text": "<image>" }, { "id": 32000, "logprob": -10.03125, "text": "<image>" }, { "id": 32000, "logprob": -10.140625, "text": "<image>" }, { "id": 32000, "logprob": -10.3125, "text": "<image>" }, { "id": 32000, "logprob": -12.6640625, "text": "<image>" }, { "id": 32000, "logprob": -15.4921875, "text": "<image>" }, { "id": 32000, "logprob": -13.8671875, "text": "<image>" }, { "id": 32000, "logprob": -11.3671875, "text": "<image>" }, { "id": 32000, "logprob": -10.1015625, "text": "<image>" }, { "id": 32000, "logprob": -10.1796875, "text": "<image>" }, { "id": 32000, "logprob": -10.2421875, "text": "<image>" }, { "id": 32000, "logprob": -10.4609375, "text": "<image>" }, { "id": 32000, "logprob": -14.34375, "text": "<image>" }, { "id": 32000, "logprob": -13.1796875, "text": "<image>" }, { "id": 32000, "logprob": -13.328125, "text": "<image>" }, { "id": 32000, "logprob": -14.3515625, "text": "<image>" }, { "id": 32000, "logprob": -11.0546875, "text": "<image>" }, { "id": 32000, "logprob": -10.9140625, "text": "<image>" }, { "id": 32000, "logprob": -10.515625, "text": "<image>" }, { "id": 32000, "logprob": -10.4453125, "text": "<image>" }, { "id": 32000, "logprob": -14.2421875, "text": "<image>" }, { "id": 32000, "logprob": -13.59375, "text": "<image>" }, { "id": 32000, "logprob": -11.234375, "text": "<image>" }, { "id": 32000, "logprob": -10.8125, "text": "<image>" }, { "id": 32000, "logprob": -17.234375, "text": "<image>" }, { "id": 32000, "logprob": -10.3984375, "text": "<image>" }, { "id": 32000, "logprob": -10.390625, "text": "<image>" }, { "id": 32000, "logprob": -10.6484375, "text": "<image>" }, { "id": 32000, "logprob": -17.984375, "text": "<image>" }, { "id": 32000, "logprob": -10.84375, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -9.8671875, "text": "<image>" }, { "id": 32000, "logprob": -10.7578125, "text": "<image>" }, { "id": 32000, "logprob": -10.5390625, "text": "<image>" }, { "id": 32000, "logprob": -10.984375, "text": "<image>" }, { "id": 32000, "logprob": -10.625, "text": "<image>" }, { "id": 32000, "logprob": -11.109375, "text": "<image>" }, { "id": 32000, "logprob": -10.5, "text": "<image>" }, { "id": 32000, "logprob": -11.4609375, "text": "<image>" }, { "id": 32000, "logprob": -13.09375, "text": "<image>" }, { "id": 32000, "logprob": -10.8203125, "text": "<image>" }, { "id": 32000, "logprob": -10.5390625, "text": "<image>" }, { "id": 32000, "logprob": -10.625, "text": "<image>" }, { "id": 32000, "logprob": -10.9296875, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -10.578125, "text": "<image>" }, { "id": 32000, "logprob": -10.7265625, "text": "<image>" }, { "id": 32000, "logprob": -11.40625, "text": "<image>" }, { "id": 32000, "logprob": -10.359375, "text": "<image>" }, { "id": 32000, "logprob": -11.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.0390625, "text": "<image>" }, { "id": 32000, "logprob": -18.703125, "text": "<image>" }, { "id": 32000, "logprob": -11.015625, "text": "<image>" }, { "id": 32000, "logprob": -15.6796875, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -10.4140625, "text": "<image>" }, { "id": 32000, "logprob": -13.015625, "text": "<image>" }, { "id": 32000, "logprob": -11.8046875, "text": "<image>" }, { "id": 32000, "logprob": -10.5703125, "text": "<image>" }, { "id": 32000, "logprob": -10.8125, "text": "<image>" }, { "id": 32000, "logprob": -10.8046875, "text": "<image>" }, { "id": 32000, "logprob": -10.2734375, "text": "<image>" }, { "id": 32000, "logprob": -10.234375, "text": "<image>" }, { "id": 32000, "logprob": -10.265625, "text": "<image>" }, { "id": 32000, "logprob": -11.140625, "text": "<image>" }, { "id": 32000, "logprob": -12.8125, "text": "<image>" }, { "id": 32000, "logprob": -10.671875, "text": "<image>" }, { "id": 32000, "logprob": -11.40625, "text": "<image>" }, { "id": 32000, "logprob": -12.8046875, "text": "<image>" }, { "id": 32000, "logprob": -10.8125, "text": "<image>" }, { "id": 32000, "logprob": -11.15625, "text": "<image>" }, { "id": 32000, "logprob": -10.5234375, "text": "<image>" }, { "id": 32000, "logprob": -15.3203125, "text": "<image>" }, { "id": 32000, "logprob": -10.2265625, "text": "<image>" }, { "id": 32000, "logprob": -11.0546875, "text": "<image>" }, { "id": 32000, "logprob": -14.671875, "text": "<image>" }, { "id": 32000, "logprob": -10.375, "text": "<image>" }, { "id": 32000, "logprob": -11.34375, "text": "<image>" }, { "id": 32000, "logprob": -12.5234375, "text": "<image>" }, { "id": 32000, "logprob": -12.4921875, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -11.0859375, "text": "<image>" }, { "id": 32000, "logprob": -10.9765625, "text": "<image>" }, { "id": 32000, "logprob": -10.921875, "text": "<image>" }, { "id": 32000, "logprob": -11.0234375, "text": "<image>" }, { "id": 32000, "logprob": -10.96875, "text": "<image>" }, { "id": 32000, "logprob": -11.1484375, "text": "<image>" }, { "id": 32000, "logprob": -10.71875, "text": "<image>" }, { "id": 32000, "logprob": -10.734375, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -10.890625, "text": "<image>" }, { "id": 32000, "logprob": -13.375, "text": "<image>" }, { "id": 32000, "logprob": -10.984375, "text": "<image>" }, { "id": 32000, "logprob": -10.6640625, "text": "<image>" }, { "id": 32000, "logprob": -10.7265625, "text": "<image>" }, { "id": 32000, "logprob": -10.75, "text": "<image>" }, { "id": 32000, "logprob": -10.640625, "text": "<image>" }, { "id": 32000, "logprob": -10.6796875, "text": "<image>" }, { "id": 32000, "logprob": -11.828125, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -12.2265625, "text": "<image>" }, { "id": 32000, "logprob": -10.9765625, "text": "<image>" }, { "id": 32000, "logprob": -12.5, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -11.2421875, "text": "<image>" }, { "id": 32000, "logprob": -11.890625, "text": "<image>" }, { "id": 32000, "logprob": -11.2265625, "text": "<image>" }, { "id": 32000, "logprob": -9.984375, "text": "<image>" }, { "id": 32000, "logprob": -10.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -11.109375, "text": "<image>" }, { "id": 32000, "logprob": -14.03125, "text": "<image>" }, { "id": 32000, "logprob": -10.8984375, "text": "<image>" }, { "id": 32000, "logprob": -10.953125, "text": "<image>" }, { "id": 32000, "logprob": -10.9296875, "text": "<image>" }, { "id": 32000, "logprob": -11.78125, "text": "<image>" }, { "id": 32000, "logprob": -15.1484375, "text": "<image>" }, { "id": 32000, "logprob": -10.96875, "text": "<image>" }, { "id": 32000, "logprob": -10.890625, "text": "<image>" }, { "id": 32000, "logprob": -10.890625, "text": "<image>" }, { "id": 32000, "logprob": -10.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.453125, "text": "<image>" }, { "id": 32000, "logprob": -13.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.484375, "text": "<image>" }, { "id": 32000, "logprob": -11.359375, "text": "<image>" }, { "id": 32000, "logprob": -12.3359375, "text": "<image>" }, { "id": 32000, "logprob": -11.59375, "text": "<image>" }, { "id": 32000, "logprob": -12.5, "text": "<image>" }, { "id": 32000, "logprob": -12.6328125, "text": "<image>" }, { "id": 32000, "logprob": -11.203125, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -10.9296875, "text": "<image>" }, { "id": 32000, "logprob": -10.7421875, "text": "<image>" }, { "id": 32000, "logprob": -15.390625, "text": "<image>" }, { "id": 32000, "logprob": -15.0234375, "text": "<image>" }, { "id": 32000, "logprob": -12.75, "text": "<image>" }, { "id": 32000, "logprob": -12.421875, "text": "<image>" }, { "id": 32000, "logprob": -12.2109375, "text": "<image>" }, { "id": 32000, "logprob": -11.265625, "text": "<image>" }, { "id": 32000, "logprob": -13.359375, "text": "<image>" }, { "id": 32000, "logprob": -10.640625, "text": "<image>" }, { "id": 32000, "logprob": -10.5859375, "text": "<image>" }, { "id": 32000, "logprob": -11.7578125, "text": "<image>" }, { "id": 32000, "logprob": -11.734375, "text": "<image>" }, { "id": 32000, "logprob": -10.421875, "text": "<image>" }, { "id": 32000, "logprob": -10.5859375, "text": "<image>" }, { "id": 32000, "logprob": -10.4609375, "text": "<image>" }, { "id": 32000, "logprob": -17.53125, "text": "<image>" }, { "id": 32000, "logprob": -11.4765625, "text": "<image>" }, { "id": 32000, "logprob": -14.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.0078125, "text": "<image>" }, { "id": 32000, "logprob": -12.8359375, "text": "<image>" }, { "id": 32000, "logprob": -11.25, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -12.6875, "text": "<image>" }, { "id": 32000, "logprob": -11.109375, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -10.3359375, "text": "<image>" }, { "id": 32000, "logprob": -13.46875, "text": "<image>" }, { "id": 32000, "logprob": -11.8671875, "text": "<image>" }, { "id": 32000, "logprob": -10.84375, "text": "<image>" }, { "id": 32000, "logprob": -11.0078125, "text": "<image>" }, { "id": 32000, "logprob": -11.234375, "text": "<image>" }, { "id": 32000, "logprob": -13.3359375, "text": "<image>" }, { "id": 32000, "logprob": -10.5546875, "text": "<image>" }, { "id": 32000, "logprob": -10.6796875, "text": "<image>" }, { "id": 32000, "logprob": -10.8203125, "text": "<image>" }, { "id": 32000, "logprob": -10.9921875, "text": "<image>" }, { "id": 32000, "logprob": -10.9921875, "text": "<image>" }, { "id": 32000, "logprob": -10.6328125, "text": "<image>" }, { "id": 32000, "logprob": -12.2578125, "text": "<image>" }, { "id": 32000, "logprob": -10.8203125, "text": "<image>" }, { "id": 32000, "logprob": -12.265625, "text": "<image>" }, { "id": 32000, "logprob": -12.25, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -10.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.8125, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -12.828125, "text": "<image>" }, { "id": 32000, "logprob": -11.328125, "text": "<image>" }, { "id": 32000, "logprob": -10.296875, "text": "<image>" }, { "id": 32000, "logprob": -10.796875, "text": "<image>" }, { "id": 32000, "logprob": -11.0390625, "text": "<image>" }, { "id": 32000, "logprob": -11.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.2109375, "text": "<image>" }, { "id": 32000, "logprob": -11.5078125, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -10.5546875, "text": "<image>" }, { "id": 32000, "logprob": -10.3515625, "text": "<image>" }, { "id": 32000, "logprob": -10.609375, "text": "<image>" }, { "id": 32000, "logprob": -10.796875, "text": "<image>" }, { "id": 32000, "logprob": -10.828125, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -11.9375, "text": "<image>" }, { "id": 32000, "logprob": -12.7109375, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -11.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -13.3984375, "text": "<image>" }, { "id": 32000, "logprob": -11.53125, "text": "<image>" }, { "id": 32000, "logprob": -14.1875, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -12.421875, "text": "<image>" }, { "id": 32000, "logprob": -10.90625, "text": "<image>" }, { "id": 32000, "logprob": -10.5859375, "text": "<image>" }, { "id": 32000, "logprob": -10.6015625, "text": "<image>" }, { "id": 32000, "logprob": -10.90625, "text": "<image>" }, { "id": 32000, "logprob": -11.2265625, "text": "<image>" }, { "id": 32000, "logprob": -10.90625, "text": "<image>" }, { "id": 32000, "logprob": -10.9765625, "text": "<image>" }, { "id": 32000, "logprob": -12.03125, "text": "<image>" }, { "id": 32000, "logprob": -10.8671875, "text": "<image>" }, { "id": 32000, "logprob": -10.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.8984375, "text": "<image>" }, { "id": 32000, "logprob": -10.7421875, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -11.4375, "text": "<image>" }, { "id": 32000, "logprob": -11.28125, "text": "<image>" }, { "id": 32000, "logprob": -10.9296875, "text": "<image>" }, { "id": 32000, "logprob": -11.5078125, "text": "<image>" }, { "id": 32000, "logprob": -13.578125, "text": "<image>" }, { "id": 32000, "logprob": -13.875, "text": "<image>" }, { "id": 32000, "logprob": -14.3828125, "text": "<image>" }, { "id": 32000, "logprob": -11.3515625, "text": "<image>" }, { "id": 32000, "logprob": -13.3125, "text": "<image>" }, { "id": 32000, "logprob": -10.8828125, "text": "<image>" }, { "id": 32000, "logprob": -12.703125, "text": "<image>" }, { "id": 32000, "logprob": -10.953125, "text": "<image>" }, { "id": 32000, "logprob": -10.7734375, "text": "<image>" }, { "id": 32000, "logprob": -12.5, "text": "<image>" }, { "id": 32000, "logprob": -11.390625, "text": "<image>" }, { "id": 32000, "logprob": -11.140625, "text": "<image>" }, { "id": 32000, "logprob": -10.8515625, "text": "<image>" }, { "id": 32000, "logprob": -12.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.75, "text": "<image>" }, { "id": 32000, "logprob": -10.8671875, "text": "<image>" }, { "id": 32000, "logprob": -13.8046875, "text": "<image>" }, { "id": 32000, "logprob": -11.828125, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -11.3984375, "text": "<image>" }, { "id": 32000, "logprob": -14.2890625, "text": "<image>" }, { "id": 32000, "logprob": -13.578125, "text": "<image>" }, { "id": 32000, "logprob": -13.46875, "text": "<image>" }, { "id": 32000, "logprob": -11.8828125, "text": "<image>" }, { "id": 32000, "logprob": -12.84375, "text": "<image>" }, { "id": 32000, "logprob": -14.90625, "text": "<image>" }, { "id": 32000, "logprob": -13.3125, "text": "<image>" }, { "id": 32000, "logprob": -11.140625, "text": "<image>" }, { "id": 32000, "logprob": -13.0703125, "text": "<image>" }, { "id": 32000, "logprob": -12.4296875, "text": "<image>" }, { "id": 32000, "logprob": -12.8671875, "text": "<image>" }, { "id": 32000, "logprob": -11.1953125, "text": "<image>" }, { "id": 32000, "logprob": -15.140625, "text": "<image>" }, { "id": 32000, "logprob": -12.625, "text": "<image>" }, { "id": 32000, "logprob": -13.1875, "text": "<image>" }, { "id": 32000, "logprob": -12.0859375, "text": "<image>" }, { "id": 32000, "logprob": -12.1875, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -11.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.9375, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -11.7421875, "text": "<image>" }, { "id": 32000, "logprob": -11.828125, "text": "<image>" }, { "id": 32000, "logprob": -12.1953125, "text": "<image>" }, { "id": 32000, "logprob": -12.4921875, "text": "<image>" }, { "id": 32000, "logprob": -11.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.0859375, "text": "<image>" }, { "id": 32000, "logprob": -11.265625, "text": "<image>" }, { "id": 32000, "logprob": -11.6953125, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -13.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.7109375, "text": "<image>" }, { "id": 32000, "logprob": -15.03125, "text": "<image>" }, { "id": 32000, "logprob": -12.5390625, "text": "<image>" }, { "id": 32000, "logprob": -11.859375, "text": "<image>" }, { "id": 32000, "logprob": -13.1171875, "text": "<image>" }, { "id": 32000, "logprob": -12.296875, "text": "<image>" }, { "id": 32000, "logprob": -12.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.1640625, "text": "<image>" }, { "id": 32000, "logprob": -12.96875, "text": "<image>" }, { "id": 32000, "logprob": -11.5078125, "text": "<image>" }, { "id": 32000, "logprob": -14.921875, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.6484375, "text": "<image>" }, { "id": 32000, "logprob": -11.7890625, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -14.015625, "text": "<image>" }, { "id": 32000, "logprob": -13.046875, "text": "<image>" }, { "id": 32000, "logprob": -11.7265625, "text": "<image>" }, { "id": 32000, "logprob": -13.1484375, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -13.8515625, "text": "<image>" }, { "id": 32000, "logprob": -13.78125, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -13.359375, "text": "<image>" }, { "id": 32000, "logprob": -13.2421875, "text": "<image>" }, { "id": 32000, "logprob": -13.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.859375, "text": "<image>" }, { "id": 32000, "logprob": -13.5625, "text": "<image>" }, { "id": 32000, "logprob": -13.6484375, "text": "<image>" }, { "id": 32000, "logprob": -13.6640625, "text": "<image>" }, { "id": 32000, "logprob": -11.4609375, "text": "<image>" }, { "id": 32000, "logprob": -13.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -14.6328125, "text": "<image>" }, { "id": 32000, "logprob": -11.78125, "text": "<image>" }, { "id": 32000, "logprob": -13.3984375, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -14.40625, "text": "<image>" }, { "id": 32000, "logprob": -13.5390625, "text": "<image>" }, { "id": 32000, "logprob": -14.5078125, "text": "<image>" }, { "id": 32000, "logprob": -12.7109375, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -13.1640625, "text": "<image>" }, { "id": 32000, "logprob": -13.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.875, "text": "<image>" }, { "id": 32000, "logprob": -12.1484375, "text": "<image>" }, { "id": 32000, "logprob": -12.6484375, "text": "<image>" }, { "id": 32000, "logprob": -13.6953125, "text": "<image>" }, { "id": 32000, "logprob": -12.4140625, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -13.109375, "text": "<image>" }, { "id": 32000, "logprob": -12.1796875, "text": "<image>" }, { "id": 32000, "logprob": -14.875, "text": "<image>" }, { "id": 32000, "logprob": -16.359375, "text": "<image>" }, { "id": 32000, "logprob": -11.34375, "text": "<image>" }, { "id": 32000, "logprob": -11.75, "text": "<image>" }, { "id": 32000, "logprob": -11.7890625, "text": "<image>" }, { "id": 32000, "logprob": -15.765625, "text": "<image>" }, { "id": 32000, "logprob": -15.0859375, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -11.875, "text": "<image>" }, { "id": 32000, "logprob": -13.25, "text": "<image>" }, { "id": 32000, "logprob": -13.5078125, "text": "<image>" }, { "id": 32000, "logprob": -13.1640625, "text": "<image>" }, { "id": 32000, "logprob": -14.265625, "text": "<image>" }, { "id": 32000, "logprob": -14.5859375, "text": "<image>" }, { "id": 32000, "logprob": -13.2421875, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -13.8046875, "text": "<image>" }, { "id": 32000, "logprob": -12.703125, "text": "<image>" }, { "id": 32000, "logprob": -12.0, "text": "<image>" }, { "id": 32000, "logprob": -12.2421875, "text": "<image>" }, { "id": 32000, "logprob": -16.203125, "text": "<image>" }, { "id": 32000, "logprob": -11.5859375, "text": "<image>" }, { "id": 32000, "logprob": -15.78125, "text": "<image>" }, { "id": 32000, "logprob": -12.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.953125, "text": "<image>" }, { "id": 32000, "logprob": -11.0, "text": "<image>" }, { "id": 32000, "logprob": -11.7109375, "text": "<image>" }, { "id": 32000, "logprob": -14.6875, "text": "<image>" }, { "id": 32000, "logprob": -11.34375, "text": "<image>" }, { "id": 32000, "logprob": -13.296875, "text": "<image>" }, { "id": 32000, "logprob": -12.609375, "text": "<image>" }, { "id": 32000, "logprob": -14.2265625, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -14.1640625, "text": "<image>" }, { "id": 32000, "logprob": -12.0234375, "text": "<image>" }, { "id": 32000, "logprob": -12.1953125, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.890625, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -15.1953125, "text": "<image>" }, { "id": 32000, "logprob": -11.546875, "text": "<image>" }, { "id": 32000, "logprob": -14.4375, "text": "<image>" }, { "id": 32000, "logprob": -12.265625, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.875, "text": "<image>" }, { "id": 32000, "logprob": -13.7265625, "text": "<image>" }, { "id": 32000, "logprob": -13.5390625, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -13.046875, "text": "<image>" }, { "id": 32000, "logprob": -13.953125, "text": "<image>" }, { "id": 32000, "logprob": -10.9765625, "text": "<image>" }, { "id": 32000, "logprob": -12.5078125, "text": "<image>" }, { "id": 32000, "logprob": -12.7109375, "text": "<image>" }, { "id": 32000, "logprob": -12.1796875, "text": "<image>" }, { "id": 32000, "logprob": -12.578125, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -13.4609375, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -13.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.75, "text": "<image>" }, { "id": 32000, "logprob": -15.34375, "text": "<image>" }, { "id": 32000, "logprob": -13.1875, "text": "<image>" }, { "id": 32000, "logprob": -10.3359375, "text": "<image>" }, { "id": 32000, "logprob": -13.3515625, "text": "<image>" }, { "id": 32000, "logprob": -11.8828125, "text": "<image>" }, { "id": 32000, "logprob": -12.578125, "text": "<image>" }, { "id": 32000, "logprob": -13.5078125, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -10.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.6015625, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -11.78125, "text": "<image>" }, { "id": 32000, "logprob": -11.984375, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -13.390625, "text": "<image>" }, { "id": 32000, "logprob": -12.96875, "text": "<image>" }, { "id": 32000, "logprob": -12.484375, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -13.296875, "text": "<image>" }, { "id": 32000, "logprob": -12.1171875, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -13.1328125, "text": "<image>" }, { "id": 32000, "logprob": -13.625, "text": "<image>" }, { "id": 32000, "logprob": -11.421875, "text": "<image>" }, { "id": 32000, "logprob": -11.75, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -12.3359375, "text": "<image>" }, { "id": 32000, "logprob": -12.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -13.96875, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -13.484375, "text": "<image>" }, { "id": 32000, "logprob": -12.828125, "text": "<image>" }, { "id": 32000, "logprob": -13.1640625, "text": "<image>" }, { "id": 32000, "logprob": -13.1640625, "text": "<image>" }, { "id": 32000, "logprob": -12.7734375, "text": "<image>" }, { "id": 32000, "logprob": -11.7421875, "text": "<image>" }, { "id": 32000, "logprob": -12.375, "text": "<image>" }, { "id": 32000, "logprob": -12.265625, "text": "<image>" }, { "id": 32000, "logprob": -14.6328125, "text": "<image>" }, { "id": 32000, "logprob": -12.921875, "text": "<image>" }, { "id": 32000, "logprob": -12.625, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.3828125, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.0703125, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -12.6171875, "text": "<image>" }, { "id": 32000, "logprob": -13.84375, "text": "<image>" }, { "id": 32000, "logprob": -12.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.140625, "text": "<image>" }, { "id": 32000, "logprob": -11.5078125, "text": "<image>" }, { "id": 32000, "logprob": -11.2734375, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -13.5859375, "text": "<image>" }, { "id": 32000, "logprob": -11.7890625, "text": "<image>" }, { "id": 32000, "logprob": -11.875, "text": "<image>" }, { "id": 32000, "logprob": -13.0625, "text": "<image>" }, { "id": 32000, "logprob": -13.1875, "text": "<image>" }, { "id": 32000, "logprob": -13.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.46875, "text": "<image>" }, { "id": 32000, "logprob": -12.34375, "text": "<image>" }, { "id": 32000, "logprob": -12.6953125, "text": "<image>" }, { "id": 32000, "logprob": -11.6875, "text": "<image>" }, { "id": 32000, "logprob": -11.6953125, "text": "<image>" }, { "id": 32000, "logprob": -11.7109375, "text": "<image>" }, { "id": 32000, "logprob": -15.3359375, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -16.125, "text": "<image>" }, { "id": 32000, "logprob": -15.4609375, "text": "<image>" }, { "id": 32000, "logprob": -13.921875, "text": "<image>" }, { "id": 32000, "logprob": -11.609375, "text": "<image>" }, { "id": 32000, "logprob": -11.78125, "text": "<image>" }, { "id": 32000, "logprob": -16.0, "text": "<image>" }, { "id": 32000, "logprob": -11.6484375, "text": "<image>" }, { "id": 32000, "logprob": -12.828125, "text": "<image>" }, { "id": 32000, "logprob": -12.5078125, "text": "<image>" }, { "id": 32000, "logprob": -11.734375, "text": "<image>" }, { "id": 32000, "logprob": -14.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.34375, "text": "<image>" }, { "id": 32000, "logprob": -11.7265625, "text": "<image>" }, { "id": 32000, "logprob": -13.046875, "text": "<image>" }, { "id": 32000, "logprob": -14.8984375, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -14.265625, "text": "<image>" }, { "id": 32000, "logprob": -12.109375, "text": "<image>" }, { "id": 32000, "logprob": -12.6953125, "text": "<image>" }, { "id": 32000, "logprob": -12.1953125, "text": "<image>" }, { "id": 32000, "logprob": -12.984375, "text": "<image>" }, { "id": 32000, "logprob": -13.2890625, "text": "<image>" }, { "id": 32000, "logprob": -13.0390625, "text": "<image>" }, { "id": 32000, "logprob": -11.96875, "text": "<image>" }, { "id": 32000, "logprob": -10.3984375, "text": "<image>" }, { "id": 32000, "logprob": -11.53125, "text": "<image>" }, { "id": 32000, "logprob": -14.0546875, "text": "<image>" }, { "id": 32000, "logprob": -11.5234375, "text": "<image>" }, { "id": 32000, "logprob": -11.109375, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -12.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.1796875, "text": "<image>" }, { "id": 32000, "logprob": -13.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.796875, "text": "<image>" }, { "id": 32000, "logprob": -11.625, "text": "<image>" }, { "id": 32000, "logprob": -13.59375, "text": "<image>" }, { "id": 32000, "logprob": -11.890625, "text": "<image>" }, { "id": 32000, "logprob": -14.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.5703125, "text": "<image>" }, { "id": 32000, "logprob": -12.7734375, "text": "<image>" }, { "id": 32000, "logprob": -11.375, "text": "<image>" }, { "id": 32000, "logprob": -14.03125, "text": "<image>" }, { "id": 32000, "logprob": -11.5546875, "text": "<image>" }, { "id": 32000, "logprob": -11.7578125, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -14.53125, "text": "<image>" }, { "id": 32000, "logprob": -12.609375, "text": "<image>" }, { "id": 32000, "logprob": -14.609375, "text": "<image>" }, { "id": 32000, "logprob": -13.59375, "text": "<image>" }, { "id": 32000, "logprob": -13.2109375, "text": "<image>" }, { "id": 32000, "logprob": -12.7734375, "text": "<image>" }, { "id": 32000, "logprob": -12.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.6875, "text": "<image>" }, { "id": 32000, "logprob": -12.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.484375, "text": "<image>" }, { "id": 32000, "logprob": -15.609375, "text": "<image>" }, { "id": 32000, "logprob": -11.859375, "text": "<image>" }, { "id": 32000, "logprob": -13.078125, "text": "<image>" }, { "id": 32000, "logprob": -12.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -11.546875, "text": "<image>" }, { "id": 32000, "logprob": -12.1171875, "text": "<image>" }, { "id": 32000, "logprob": -11.578125, "text": "<image>" }, { "id": 32000, "logprob": -12.5, "text": "<image>" }, { "id": 32000, "logprob": -15.5390625, "text": "<image>" }, { "id": 32000, "logprob": -14.2578125, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -13.8671875, "text": "<image>" }, { "id": 32000, "logprob": -13.2265625, "text": "<image>" }, { "id": 32000, "logprob": -14.8046875, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -11.8671875, "text": "<image>" }, { "id": 32000, "logprob": -13.1484375, "text": "<image>" }, { "id": 32000, "logprob": -12.4140625, "text": "<image>" }, { "id": 32000, "logprob": -10.671875, "text": "<image>" }, { "id": 32000, "logprob": -14.6875, "text": "<image>" }, { "id": 32000, "logprob": -15.7734375, "text": "<image>" }, { "id": 32000, "logprob": -15.453125, "text": "<image>" }, { "id": 32000, "logprob": -11.234375, "text": "<image>" }, { "id": 32000, "logprob": -14.484375, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -16.234375, "text": "<image>" }, { "id": 32000, "logprob": -14.875, "text": "<image>" }, { "id": 32000, "logprob": -16.109375, "text": "<image>" }, { "id": 32000, "logprob": -13.2890625, "text": "<image>" }, { "id": 32000, "logprob": -12.5546875, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.9921875, "text": "<image>" }, { "id": 32000, "logprob": -11.2734375, "text": "<image>" }, { "id": 32000, "logprob": -12.1484375, "text": "<image>" }, { "id": 32000, "logprob": -13.84375, "text": "<image>" }, { "id": 32000, "logprob": -13.3984375, "text": "<image>" }, { "id": 32000, "logprob": -12.3203125, "text": "<image>" }, { "id": 32000, "logprob": -11.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -11.984375, "text": "<image>" }, { "id": 32000, "logprob": -15.140625, "text": "<image>" }, { "id": 32000, "logprob": -10.828125, "text": "<image>" }, { "id": 32000, "logprob": -13.171875, "text": "<image>" }, { "id": 32000, "logprob": -15.4453125, "text": "<image>" }, { "id": 32000, "logprob": -11.1015625, "text": "<image>" }, { "id": 32000, "logprob": -10.5078125, "text": "<image>" }, { "id": 32000, "logprob": -11.859375, "text": "<image>" }, { "id": 32000, "logprob": -11.0, "text": "<image>" }, { "id": 32000, "logprob": -11.2890625, "text": "<image>" }, { "id": 32000, "logprob": -11.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.4921875, "text": "<image>" }, { "id": 32000, "logprob": -16.46875, "text": "<image>" }, { "id": 32000, "logprob": -13.46875, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -11.1953125, "text": "<image>" }, { "id": 32000, "logprob": -11.3359375, "text": "<image>" }, { "id": 32000, "logprob": -12.1640625, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -11.8671875, "text": "<image>" }, { "id": 32000, "logprob": -9.9140625, "text": "<image>" }, { "id": 32000, "logprob": -11.265625, "text": "<image>" }, { "id": 32000, "logprob": -12.1875, "text": "<image>" }, { "id": 32000, "logprob": -14.265625, "text": "<image>" }, { "id": 32000, "logprob": -12.15625, "text": "<image>" }, { "id": 32000, "logprob": -11.65625, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -11.8984375, "text": "<image>" }, { "id": 32000, "logprob": -14.859375, "text": "<image>" }, { "id": 32000, "logprob": -14.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.8671875, "text": "<image>" }, { "id": 32000, "logprob": -10.59375, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -12.8984375, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -15.765625, "text": "<image>" }, { "id": 32000, "logprob": -15.203125, "text": "<image>" }, { "id": 32000, "logprob": -14.25, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -12.34375, "text": "<image>" }, { "id": 32000, "logprob": -12.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.0078125, "text": "<image>" }, { "id": 32000, "logprob": -11.296875, "text": "<image>" }, { "id": 32000, "logprob": -13.2734375, "text": "<image>" }, { "id": 32000, "logprob": -11.7578125, "text": "<image>" }, { "id": 32000, "logprob": -13.09375, "text": "<image>" }, { "id": 32000, "logprob": -13.6796875, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.421875, "text": "<image>" }, { "id": 32000, "logprob": -11.984375, "text": "<image>" }, { "id": 32000, "logprob": -12.2890625, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -13.125, "text": "<image>" }, { "id": 32000, "logprob": -15.9453125, "text": "<image>" }, { "id": 32000, "logprob": -13.140625, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.5234375, "text": "<image>" }, { "id": 32000, "logprob": -11.328125, "text": "<image>" }, { "id": 32000, "logprob": -11.5, "text": "<image>" }, { "id": 32000, "logprob": -11.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.3359375, "text": "<image>" }, { "id": 32000, "logprob": -9.7109375, "text": "<image>" }, { "id": 32000, "logprob": -11.765625, "text": "<image>" }, { "id": 32000, "logprob": -14.2421875, "text": "<image>" }, { "id": 32000, "logprob": -11.96875, "text": "<image>" }, { "id": 32000, "logprob": -12.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.5234375, "text": "<image>" }, { "id": 32000, "logprob": -12.1953125, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.84375, "text": "<image>" }, { "id": 32000, "logprob": -12.0, "text": "<image>" }, { "id": 32000, "logprob": -12.1953125, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.7109375, "text": "<image>" }, { "id": 32000, "logprob": -13.0859375, "text": "<image>" }, { "id": 32000, "logprob": -13.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.7421875, "text": "<image>" }, { "id": 32000, "logprob": -11.953125, "text": "<image>" }, { "id": 32000, "logprob": -13.8046875, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.0859375, "text": "<image>" }, { "id": 32000, "logprob": -10.015625, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -13.8125, "text": "<image>" }, { "id": 32000, "logprob": -12.109375, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -12.5703125, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -12.453125, "text": "<image>" }, { "id": 32000, "logprob": -12.2890625, "text": "<image>" }, { "id": 32000, "logprob": -12.46875, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.015625, "text": "<image>" }, { "id": 32000, "logprob": -12.765625, "text": "<image>" }, { "id": 32000, "logprob": -14.421875, "text": "<image>" }, { "id": 32000, "logprob": -13.25, "text": "<image>" }, { "id": 32000, "logprob": -16.53125, "text": "<image>" }, { "id": 32000, "logprob": -14.484375, "text": "<image>" }, { "id": 32000, "logprob": -13.7421875, "text": "<image>" }, { "id": 32000, "logprob": -13.46875, "text": "<image>" }, { "id": 32000, "logprob": -13.046875, "text": "<image>" }, { "id": 32000, "logprob": -12.8671875, "text": "<image>" }, { "id": 32000, "logprob": -12.4921875, "text": "<image>" }, { "id": 32000, "logprob": -13.2265625, "text": "<image>" }, { "id": 32000, "logprob": -13.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -10.765625, "text": "<image>" }, { "id": 32000, "logprob": -13.109375, "text": "<image>" }, { "id": 32000, "logprob": -12.8515625, "text": "<image>" }, { "id": 32000, "logprob": -14.7421875, "text": "<image>" }, { "id": 32000, "logprob": -12.84375, "text": "<image>" }, { "id": 32000, "logprob": -12.890625, "text": "<image>" }, { "id": 32000, "logprob": -12.609375, "text": "<image>" }, { "id": 32000, "logprob": -12.9296875, "text": "<image>" }, { "id": 32000, "logprob": -12.625, "text": "<image>" }, { "id": 32000, "logprob": -12.6171875, "text": "<image>" }, { "id": 32000, "logprob": -12.8671875, "text": "<image>" }, { "id": 32000, "logprob": -12.7265625, "text": "<image>" }, { "id": 32000, "logprob": -12.9765625, "text": "<image>" }, { "id": 32000, "logprob": -12.6484375, "text": "<image>" }, { "id": 32000, "logprob": -13.03125, "text": "<image>" }, { "id": 32000, "logprob": -13.4296875, "text": "<image>" }, { "id": 32000, "logprob": -13.2734375, "text": "<image>" }, { "id": 32000, "logprob": -12.9921875, "text": "<image>" }, { "id": 32000, "logprob": -11.5234375, "text": "<image>" }, { "id": 32000, "logprob": -15.015625, "text": "<image>" }, { "id": 32000, "logprob": -12.2109375, "text": "<image>" }, { "id": 32000, "logprob": -12.6953125, "text": "<image>" }, { "id": 32000, "logprob": -14.9375, "text": "<image>" }, { "id": 32000, "logprob": -12.6953125, "text": "<image>" }, { "id": 32000, "logprob": -10.78125, "text": "<image>" }, { "id": 32000, "logprob": -13.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.9921875, "text": "<image>" }, { "id": 32000, "logprob": -16.125, "text": "<image>" }, { "id": 32000, "logprob": -13.46875, "text": "<image>" }, { "id": 32000, "logprob": -13.7109375, "text": "<image>" }, { "id": 32000, "logprob": -13.6015625, "text": "<image>" }, { "id": 32000, "logprob": -13.25, "text": "<image>" }, { "id": 32000, "logprob": -13.2578125, "text": "<image>" }, { "id": 32000, "logprob": -13.5, "text": "<image>" }, { "id": 32000, "logprob": -13.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.6171875, "text": "<image>" }, { "id": 32000, "logprob": -14.2890625, "text": "<image>" }, { "id": 32000, "logprob": -14.4765625, "text": "<image>" }, { "id": 32000, "logprob": -13.1484375, "text": "<image>" }, { "id": 32000, "logprob": -17.0625, "text": "<image>" }, { "id": 32000, "logprob": -14.046875, "text": "<image>" }, { "id": 32000, "logprob": -14.2734375, "text": "<image>" }, { "id": 32000, "logprob": -12.625, "text": "<image>" }, { "id": 32000, "logprob": -12.5625, "text": "<image>" }, { "id": 32000, "logprob": -13.9140625, "text": "<image>" }, { "id": 32000, "logprob": -13.03125, "text": "<image>" }, { "id": 32000, "logprob": -10.5, "text": "<image>" }, { "id": 32000, "logprob": -13.1953125, "text": "<image>" }, { "id": 32000, "logprob": -13.53125, "text": "<image>" }, { "id": 32000, "logprob": -13.3828125, "text": "<image>" }, { "id": 32000, "logprob": -14.0625, "text": "<image>" }, { "id": 32000, "logprob": -13.5234375, "text": "<image>" }, { "id": 32000, "logprob": -14.15625, "text": "<image>" }, { "id": 32000, "logprob": -16.9375, "text": "<image>" }, { "id": 32000, "logprob": -14.015625, "text": "<image>" }, { "id": 32000, "logprob": -14.09375, "text": "<image>" }, { "id": 32000, "logprob": -13.828125, "text": "<image>" }, { "id": 32000, "logprob": -13.328125, "text": "<image>" }, { "id": 32000, "logprob": -13.0234375, "text": "<image>" }, { "id": 32000, "logprob": -13.375, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -14.7890625, "text": "<image>" }, { "id": 32000, "logprob": -13.53125, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.4609375, "text": "<image>" }, { "id": 32000, "logprob": -13.078125, "text": "<image>" }, { "id": 32000, "logprob": -12.3359375, "text": "<image>" }, { "id": 32000, "logprob": -13.6171875, "text": "<image>" }, { "id": 32000, "logprob": -12.75, "text": "<image>" }, { "id": 32000, "logprob": -14.859375, "text": "<image>" }, { "id": 32000, "logprob": -13.078125, "text": "<image>" }, { "id": 32000, "logprob": -14.171875, "text": "<image>" }, { "id": 32000, "logprob": -13.078125, "text": "<image>" }, { "id": 32000, "logprob": -14.4921875, "text": "<image>" }, { "id": 32000, "logprob": -18.15625, "text": "<image>" }, { "id": 32000, "logprob": -14.703125, "text": "<image>" }, { "id": 32000, "logprob": -14.71875, "text": "<image>" }, { "id": 32000, "logprob": -16.3125, "text": "<image>" }, { "id": 32000, "logprob": -14.9765625, "text": "<image>" }, { "id": 32000, "logprob": -13.8046875, "text": "<image>" }, { "id": 32000, "logprob": -15.375, "text": "<image>" }, { "id": 32000, "logprob": -13.3984375, "text": "<image>" }, { "id": 32000, "logprob": -14.578125, "text": "<image>" }, { "id": 32000, "logprob": -15.2890625, "text": "<image>" }, { "id": 32000, "logprob": -13.609375, "text": "<image>" }, { "id": 32000, "logprob": -13.328125, "text": "<image>" }, { "id": 32000, "logprob": -15.8359375, "text": "<image>" }, { "id": 32000, "logprob": -13.765625, "text": "<image>" }, { "id": 32000, "logprob": -16.90625, "text": "<image>" }, { "id": 32000, "logprob": -13.3203125, "text": "<image>" }, { "id": 32000, "logprob": -13.7734375, "text": "<image>" }, { "id": 32000, "logprob": -13.3203125, "text": "<image>" }, { "id": 32000, "logprob": -13.4765625, "text": "<image>" }, { "id": 32000, "logprob": -13.390625, "text": "<image>" }, { "id": 32000, "logprob": -13.2734375, "text": "<image>" }, { "id": 32000, "logprob": -13.046875, "text": "<image>" }, { "id": 32000, "logprob": -13.7421875, "text": "<image>" }, { "id": 32000, "logprob": -13.5, "text": "<image>" }, { "id": 32000, "logprob": -13.0390625, "text": "<image>" }, { "id": 32000, "logprob": -13.4375, "text": "<image>" }, { "id": 32000, "logprob": -13.7421875, "text": "<image>" }, { "id": 32000, "logprob": -13.6796875, "text": "<image>" }, { "id": 32000, "logprob": -13.546875, "text": "<image>" }, { "id": 32000, "logprob": -13.3203125, "text": "<image>" }, { "id": 32000, "logprob": -13.4453125, "text": "<image>" }, { "id": 32000, "logprob": -13.6171875, "text": "<image>" }, { "id": 32000, "logprob": -15.140625, "text": "<image>" }, { "id": 32000, "logprob": -13.21875, "text": "<image>" }, { "id": 32000, "logprob": -13.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.0234375, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -11.3125, "text": "<image>" }, { "id": 32000, "logprob": -10.65625, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -13.25, "text": "<image>" }, { "id": 32000, "logprob": -11.6328125, "text": "<image>" }, { "id": 32000, "logprob": -12.875, "text": "<image>" }, { "id": 32000, "logprob": -12.546875, "text": "<image>" }, { "id": 32000, "logprob": -12.8203125, "text": "<image>" }, { "id": 32000, "logprob": -13.9765625, "text": "<image>" }, { "id": 32000, "logprob": -13.1796875, "text": "<image>" }, { "id": 32000, "logprob": -12.984375, "text": "<image>" }, { "id": 32000, "logprob": -13.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.75, "text": "<image>" }, { "id": 32000, "logprob": -14.578125, "text": "<image>" }, { "id": 32000, "logprob": -14.625, "text": "<image>" }, { "id": 32000, "logprob": -14.734375, "text": "<image>" }, { "id": 32000, "logprob": -14.5078125, "text": "<image>" }, { "id": 32000, "logprob": -14.9140625, "text": "<image>" }, { "id": 32000, "logprob": -13.046875, "text": "<image>" }, { "id": 32000, "logprob": -13.453125, "text": "<image>" }, { "id": 32000, "logprob": -14.78125, "text": "<image>" }, { "id": 32000, "logprob": -14.9453125, "text": "<image>" }, { "id": 32000, "logprob": -13.9921875, "text": "<image>" }, { "id": 32000, "logprob": -13.4921875, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -14.4765625, "text": "<image>" }, { "id": 32000, "logprob": -15.9609375, "text": "<image>" }, { "id": 32000, "logprob": -14.234375, "text": "<image>" }, { "id": 32000, "logprob": -13.1328125, "text": "<image>" }, { "id": 32000, "logprob": -12.6640625, "text": "<image>" }, { "id": 32000, "logprob": -12.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.7734375, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.6796875, "text": "<image>" }, { "id": 32000, "logprob": -13.625, "text": "<image>" }, { "id": 32000, "logprob": -12.890625, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.7734375, "text": "<image>" }, { "id": 32000, "logprob": -14.265625, "text": "<image>" }, { "id": 32000, "logprob": -12.0234375, "text": "<image>" }, { "id": 32000, "logprob": -12.71875, "text": "<image>" }, { "id": 32000, "logprob": -13.0078125, "text": "<image>" }, { "id": 32000, "logprob": -12.8671875, "text": "<image>" }, { "id": 32000, "logprob": -15.078125, "text": "<image>" }, { "id": 32000, "logprob": -12.921875, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -10.5078125, "text": "<image>" }, { "id": 32000, "logprob": -10.265625, "text": "<image>" }, { "id": 32000, "logprob": -11.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.0703125, "text": "<image>" }, { "id": 32000, "logprob": -11.28125, "text": "<image>" }, { "id": 32000, "logprob": -11.046875, "text": "<image>" }, { "id": 32000, "logprob": -13.3203125, "text": "<image>" }, { "id": 32000, "logprob": -11.03125, "text": "<image>" }, { "id": 32000, "logprob": -12.53125, "text": "<image>" }, { "id": 32000, "logprob": -12.203125, "text": "<image>" }, { "id": 32000, "logprob": -12.015625, "text": "<image>" }, { "id": 32000, "logprob": -13.8125, "text": "<image>" }, { "id": 32000, "logprob": -12.0859375, "text": "<image>" }, { "id": 32000, "logprob": -14.1796875, "text": "<image>" }, { "id": 32000, "logprob": -14.0703125, "text": "<image>" }, { "id": 32000, "logprob": -14.03125, "text": "<image>" }, { "id": 32000, "logprob": -14.0625, "text": "<image>" }, { "id": 32000, "logprob": -13.0390625, "text": "<image>" }, { "id": 32000, "logprob": -12.8203125, "text": "<image>" }, { "id": 32000, "logprob": -14.453125, "text": "<image>" }, { "id": 32000, "logprob": -14.6875, "text": "<image>" }, { "id": 32000, "logprob": -14.3046875, "text": "<image>" }, { "id": 32000, "logprob": -14.8671875, "text": "<image>" }, { "id": 32000, "logprob": -14.1640625, "text": "<image>" }, { "id": 32000, "logprob": -13.203125, "text": "<image>" }, { "id": 32000, "logprob": -13.109375, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -13.3203125, "text": "<image>" }, { "id": 32000, "logprob": -16.140625, "text": "<image>" }, { "id": 32000, "logprob": -12.7109375, "text": "<image>" }, { "id": 32000, "logprob": -13.7109375, "text": "<image>" }, { "id": 32000, "logprob": -13.3984375, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -12.140625, "text": "<image>" }, { "id": 32000, "logprob": -12.3828125, "text": "<image>" }, { "id": 32000, "logprob": -12.875, "text": "<image>" }, { "id": 32000, "logprob": -13.6484375, "text": "<image>" }, { "id": 32000, "logprob": -13.0390625, "text": "<image>" }, { "id": 32000, "logprob": -13.3203125, "text": "<image>" }, { "id": 32000, "logprob": -12.6640625, "text": "<image>" }, { "id": 32000, "logprob": -12.34375, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.453125, "text": "<image>" }, { "id": 32000, "logprob": -12.015625, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -14.46875, "text": "<image>" }, { "id": 32000, "logprob": -13.3203125, "text": "<image>" }, { "id": 32000, "logprob": -12.71875, "text": "<image>" }, { "id": 32000, "logprob": -10.6484375, "text": "<image>" }, { "id": 32000, "logprob": -11.6484375, "text": "<image>" }, { "id": 32000, "logprob": -10.84375, "text": "<image>" }, { "id": 32000, "logprob": -10.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.2578125, "text": "<image>" }, { "id": 32000, "logprob": -11.7265625, "text": "<image>" }, { "id": 32000, "logprob": -10.8515625, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -13.140625, "text": "<image>" }, { "id": 32000, "logprob": -12.734375, "text": "<image>" }, { "id": 32000, "logprob": -12.1640625, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -13.1328125, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -12.84375, "text": "<image>" }, { "id": 32000, "logprob": -12.6640625, "text": "<image>" }, { "id": 32000, "logprob": -12.0859375, "text": "<image>" }, { "id": 32000, "logprob": -13.609375, "text": "<image>" }, { "id": 32000, "logprob": -12.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.3125, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -14.0546875, "text": "<image>" }, { "id": 32000, "logprob": -13.984375, "text": "<image>" }, { "id": 32000, "logprob": -13.7890625, "text": "<image>" }, { "id": 32000, "logprob": -13.2109375, "text": "<image>" }, { "id": 32000, "logprob": -13.484375, "text": "<image>" }, { "id": 32000, "logprob": -13.703125, "text": "<image>" }, { "id": 32000, "logprob": -12.59375, "text": "<image>" }, { "id": 32000, "logprob": -15.7890625, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.46875, "text": "<image>" }, { "id": 32000, "logprob": -12.0, "text": "<image>" }, { "id": 32000, "logprob": -11.3359375, "text": "<image>" }, { "id": 32000, "logprob": -11.4296875, "text": "<image>" }, { "id": 32000, "logprob": -11.8671875, "text": "<image>" }, { "id": 32000, "logprob": -13.828125, "text": "<image>" }, { "id": 32000, "logprob": -11.75, "text": "<image>" }, { "id": 32000, "logprob": -11.46875, "text": "<image>" }, { "id": 32000, "logprob": -11.4765625, "text": "<image>" }, { "id": 32000, "logprob": -11.6328125, "text": "<image>" }, { "id": 32000, "logprob": -11.625, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -15.2265625, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -13.3203125, "text": "<image>" }, { "id": 32000, "logprob": -15.2734375, "text": "<image>" }, { "id": 32000, "logprob": -11.1875, "text": "<image>" }, { "id": 32000, "logprob": -11.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -13.453125, "text": "<image>" }, { "id": 32000, "logprob": -13.0234375, "text": "<image>" }, { "id": 32000, "logprob": -11.53125, "text": "<image>" }, { "id": 32000, "logprob": -12.0078125, "text": "<image>" }, { "id": 32000, "logprob": -11.1875, "text": "<image>" }, { "id": 32000, "logprob": -11.890625, "text": "<image>" }, { "id": 32000, "logprob": -11.3125, "text": "<image>" }, { "id": 32000, "logprob": -11.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.4921875, "text": "<image>" }, { "id": 32000, "logprob": -12.0703125, "text": "<image>" }, { "id": 32000, "logprob": -13.3671875, "text": "<image>" }, { "id": 32000, "logprob": -13.28125, "text": "<image>" }, { "id": 32000, "logprob": -12.8359375, "text": "<image>" }, { "id": 32000, "logprob": -13.0390625, "text": "<image>" }, { "id": 32000, "logprob": -13.078125, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -13.484375, "text": "<image>" }, { "id": 32000, "logprob": -12.578125, "text": "<image>" }, { "id": 32000, "logprob": -13.703125, "text": "<image>" }, { "id": 32000, "logprob": -14.34375, "text": "<image>" }, { "id": 32000, "logprob": -14.3203125, "text": "<image>" }, { "id": 32000, "logprob": -12.7109375, "text": "<image>" }, { "id": 32000, "logprob": -14.28125, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -13.7890625, "text": "<image>" }, { "id": 32000, "logprob": -15.203125, "text": "<image>" }, { "id": 32000, "logprob": -13.28125, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -14.3203125, "text": "<image>" }, { "id": 32000, "logprob": -13.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.1171875, "text": "<image>" }, { "id": 32000, "logprob": -14.03125, "text": "<image>" }, { "id": 32000, "logprob": -14.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -11.953125, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.6953125, "text": "<image>" }, { "id": 32000, "logprob": -12.5390625, "text": "<image>" }, { "id": 32000, "logprob": -11.6484375, "text": "<image>" }, { "id": 32000, "logprob": -11.703125, "text": "<image>" }, { "id": 32000, "logprob": -11.65625, "text": "<image>" }, { "id": 32000, "logprob": -14.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -12.3359375, "text": "<image>" }, { "id": 32000, "logprob": -11.6328125, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -12.5234375, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -11.2734375, "text": "<image>" }, { "id": 32000, "logprob": -11.2578125, "text": "<image>" }, { "id": 32000, "logprob": -13.1640625, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -13.03125, "text": "<image>" }, { "id": 32000, "logprob": -11.5234375, "text": "<image>" }, { "id": 32000, "logprob": -14.015625, "text": "<image>" }, { "id": 32000, "logprob": -13.75, "text": "<image>" }, { "id": 32000, "logprob": -12.3359375, "text": "<image>" }, { "id": 32000, "logprob": -12.921875, "text": "<image>" }, { "id": 32000, "logprob": -13.2109375, "text": "<image>" }, { "id": 32000, "logprob": -12.8359375, "text": "<image>" }, { "id": 32000, "logprob": -13.0703125, "text": "<image>" }, { "id": 32000, "logprob": -11.78125, "text": "<image>" }, { "id": 32000, "logprob": -13.703125, "text": "<image>" }, { "id": 32000, "logprob": -14.46875, "text": "<image>" }, { "id": 32000, "logprob": -14.234375, "text": "<image>" }, { "id": 32000, "logprob": -13.6796875, "text": "<image>" }, { "id": 32000, "logprob": -13.8515625, "text": "<image>" }, { "id": 32000, "logprob": -12.3203125, "text": "<image>" }, { "id": 32000, "logprob": -14.25, "text": "<image>" }, { "id": 32000, "logprob": -14.28125, "text": "<image>" }, { "id": 32000, "logprob": -14.0, "text": "<image>" }, { "id": 32000, "logprob": -14.625, "text": "<image>" }, { "id": 32000, "logprob": -12.8515625, "text": "<image>" }, { "id": 32000, "logprob": -13.453125, "text": "<image>" }, { "id": 32000, "logprob": -15.4296875, "text": "<image>" }, { "id": 32000, "logprob": -13.1171875, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -14.671875, "text": "<image>" }, { "id": 32000, "logprob": -14.2578125, "text": "<image>" }, { "id": 32000, "logprob": -11.875, "text": "<image>" }, { "id": 32000, "logprob": -12.8359375, "text": "<image>" }, { "id": 32000, "logprob": -13.828125, "text": "<image>" }, { "id": 32000, "logprob": -13.59375, "text": "<image>" }, { "id": 32000, "logprob": -12.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -12.0078125, "text": "<image>" }, { "id": 32000, "logprob": -13.40625, "text": "<image>" }, { "id": 32000, "logprob": -14.8203125, "text": "<image>" }, { "id": 32000, "logprob": -11.046875, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.5234375, "text": "<image>" }, { "id": 32000, "logprob": -11.140625, "text": "<image>" }, { "id": 32000, "logprob": -10.7890625, "text": "<image>" }, { "id": 32000, "logprob": -10.9453125, "text": "<image>" }, { "id": 32000, "logprob": -10.890625, "text": "<image>" }, { "id": 32000, "logprob": -12.78125, "text": "<image>" }, { "id": 32000, "logprob": -11.1171875, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -11.1484375, "text": "<image>" }, { "id": 32000, "logprob": -11.15625, "text": "<image>" }, { "id": 32000, "logprob": -11.40625, "text": "<image>" }, { "id": 32000, "logprob": -12.734375, "text": "<image>" }, { "id": 32000, "logprob": -13.0859375, "text": "<image>" }, { "id": 32000, "logprob": -14.046875, "text": "<image>" }, { "id": 32000, "logprob": -12.9609375, "text": "<image>" }, { "id": 32000, "logprob": -12.34375, "text": "<image>" }, { "id": 32000, "logprob": -12.84375, "text": "<image>" }, { "id": 32000, "logprob": -13.9765625, "text": "<image>" }, { "id": 32000, "logprob": -12.6875, "text": "<image>" }, { "id": 32000, "logprob": -13.484375, "text": "<image>" }, { "id": 32000, "logprob": -13.53125, "text": "<image>" }, { "id": 32000, "logprob": -13.5859375, "text": "<image>" }, { "id": 32000, "logprob": -13.3828125, "text": "<image>" }, { "id": 32000, "logprob": -14.25, "text": "<image>" }, { "id": 32000, "logprob": -14.2578125, "text": "<image>" }, { "id": 32000, "logprob": -14.1640625, "text": "<image>" }, { "id": 32000, "logprob": -13.671875, "text": "<image>" }, { "id": 32000, "logprob": -14.2578125, "text": "<image>" }, { "id": 32000, "logprob": -13.8671875, "text": "<image>" }, { "id": 32000, "logprob": -12.921875, "text": "<image>" }, { "id": 32000, "logprob": -15.6484375, "text": "<image>" }, { "id": 32000, "logprob": -13.03125, "text": "<image>" }, { "id": 32000, "logprob": -12.34375, "text": "<image>" }, { "id": 32000, "logprob": -12.7265625, "text": "<image>" }, { "id": 32000, "logprob": -14.515625, "text": "<image>" }, { "id": 32000, "logprob": -12.75, "text": "<image>" }, { "id": 32000, "logprob": -12.265625, "text": "<image>" }, { "id": 32000, "logprob": -13.546875, "text": "<image>" }, { "id": 32000, "logprob": -13.1328125, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.5625, "text": "<image>" }, { "id": 32000, "logprob": -11.6640625, "text": "<image>" }, { "id": 32000, "logprob": -12.15625, "text": "<image>" }, { "id": 32000, "logprob": -11.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.390625, "text": "<image>" }, { "id": 32000, "logprob": -11.0546875, "text": "<image>" }, { "id": 32000, "logprob": -11.96875, "text": "<image>" }, { "id": 32000, "logprob": -12.0078125, "text": "<image>" }, { "id": 32000, "logprob": -12.984375, "text": "<image>" }, { "id": 32000, "logprob": -13.2578125, "text": "<image>" }, { "id": 32000, "logprob": -12.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.3515625, "text": "<image>" }, { "id": 32000, "logprob": -11.2578125, "text": "<image>" }, { "id": 32000, "logprob": -11.8828125, "text": "<image>" }, { "id": 32000, "logprob": -11.6484375, "text": "<image>" }, { "id": 32000, "logprob": -12.8125, "text": "<image>" }, { "id": 32000, "logprob": -11.15625, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -11.59375, "text": "<image>" }, { "id": 32000, "logprob": -13.6953125, "text": "<image>" }, { "id": 32000, "logprob": -12.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.0859375, "text": "<image>" }, { "id": 32000, "logprob": -12.03125, "text": "<image>" }, { "id": 32000, "logprob": -12.296875, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -13.84375, "text": "<image>" }, { "id": 32000, "logprob": -12.875, "text": "<image>" }, { "id": 32000, "logprob": -13.953125, "text": "<image>" }, { "id": 32000, "logprob": -13.53125, "text": "<image>" }, { "id": 32000, "logprob": -13.9609375, "text": "<image>" }, { "id": 32000, "logprob": -12.46875, "text": "<image>" }, { "id": 32000, "logprob": -13.3984375, "text": "<image>" }, { "id": 32000, "logprob": -14.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.921875, "text": "<image>" }, { "id": 32000, "logprob": -14.2578125, "text": "<image>" }, { "id": 32000, "logprob": -14.3046875, "text": "<image>" }, { "id": 32000, "logprob": -14.3984375, "text": "<image>" }, { "id": 32000, "logprob": -15.3203125, "text": "<image>" }, { "id": 32000, "logprob": -13.4140625, "text": "<image>" }, { "id": 32000, "logprob": -12.3515625, "text": "<image>" }, { "id": 32000, "logprob": -13.46875, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.2890625, "text": "<image>" }, { "id": 32000, "logprob": -14.3515625, "text": "<image>" }, { "id": 32000, "logprob": -13.140625, "text": "<image>" }, { "id": 32000, "logprob": -13.0234375, "text": "<image>" }, { "id": 32000, "logprob": -12.5625, "text": "<image>" }, { "id": 32000, "logprob": -11.8125, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -11.53125, "text": "<image>" }, { "id": 32000, "logprob": -12.2265625, "text": "<image>" }, { "id": 32000, "logprob": -11.2890625, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -11.46875, "text": "<image>" }, { "id": 32000, "logprob": -11.40625, "text": "<image>" }, { "id": 32000, "logprob": -12.03125, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.4453125, "text": "<image>" }, { "id": 32000, "logprob": -11.34375, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -11.96875, "text": "<image>" }, { "id": 32000, "logprob": -14.6015625, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.4765625, "text": "<image>" }, { "id": 32000, "logprob": -13.25, "text": "<image>" }, { "id": 32000, "logprob": -12.5390625, "text": "<image>" }, { "id": 32000, "logprob": -11.078125, "text": "<image>" }, { "id": 32000, "logprob": -11.5546875, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -11.75, "text": "<image>" }, { "id": 32000, "logprob": -12.6015625, "text": "<image>" }, { "id": 32000, "logprob": -14.3203125, "text": "<image>" }, { "id": 32000, "logprob": -13.15625, "text": "<image>" }, { "id": 32000, "logprob": -13.4453125, "text": "<image>" }, { "id": 32000, "logprob": -14.2890625, "text": "<image>" }, { "id": 32000, "logprob": -13.7890625, "text": "<image>" }, { "id": 32000, "logprob": -13.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.96875, "text": "<image>" }, { "id": 32000, "logprob": -13.46875, "text": "<image>" }, { "id": 32000, "logprob": -14.1171875, "text": "<image>" }, { "id": 32000, "logprob": -13.0703125, "text": "<image>" }, { "id": 32000, "logprob": -12.9609375, "text": "<image>" }, { "id": 32000, "logprob": -14.90625, "text": "<image>" }, { "id": 32000, "logprob": -13.0390625, "text": "<image>" }, { "id": 32000, "logprob": -10.921875, "text": "<image>" }, { "id": 32000, "logprob": -13.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.8671875, "text": "<image>" }, { "id": 32000, "logprob": -12.15625, "text": "<image>" }, { "id": 32000, "logprob": -12.4140625, "text": "<image>" }, { "id": 32000, "logprob": -12.421875, "text": "<image>" }, { "id": 32000, "logprob": -13.59375, "text": "<image>" }, { "id": 32000, "logprob": -12.5, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.2265625, "text": "<image>" }, { "id": 32000, "logprob": -14.3828125, "text": "<image>" }, { "id": 32000, "logprob": -11.609375, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.2578125, "text": "<image>" }, { "id": 32000, "logprob": -11.0, "text": "<image>" }, { "id": 32000, "logprob": -11.6484375, "text": "<image>" }, { "id": 32000, "logprob": -11.1328125, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.6875, "text": "<image>" }, { "id": 32000, "logprob": -12.484375, "text": "<image>" }, { "id": 32000, "logprob": -11.8828125, "text": "<image>" }, { "id": 32000, "logprob": -12.53125, "text": "<image>" }, { "id": 32000, "logprob": -12.6484375, "text": "<image>" }, { "id": 32000, "logprob": -11.7265625, "text": "<image>" }, { "id": 32000, "logprob": -13.671875, "text": "<image>" }, { "id": 32000, "logprob": -14.8046875, "text": "<image>" }, { "id": 32000, "logprob": -11.609375, "text": "<image>" }, { "id": 32000, "logprob": -13.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.421875, "text": "<image>" }, { "id": 32000, "logprob": -12.328125, "text": "<image>" }, { "id": 32000, "logprob": -13.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.5078125, "text": "<image>" }, { "id": 32000, "logprob": -13.125, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -12.2890625, "text": "<image>" }, { "id": 32000, "logprob": -12.7734375, "text": "<image>" }, { "id": 32000, "logprob": -13.15625, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -13.8203125, "text": "<image>" }, { "id": 32000, "logprob": -13.65625, "text": "<image>" }, { "id": 32000, "logprob": -13.0390625, "text": "<image>" }, { "id": 32000, "logprob": -15.6953125, "text": "<image>" }, { "id": 32000, "logprob": -14.2265625, "text": "<image>" }, { "id": 32000, "logprob": -14.4140625, "text": "<image>" }, { "id": 32000, "logprob": -13.1328125, "text": "<image>" }, { "id": 32000, "logprob": -12.296875, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.328125, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -14.0, "text": "<image>" }, { "id": 32000, "logprob": -14.234375, "text": "<image>" }, { "id": 32000, "logprob": -11.4375, "text": "<image>" }, { "id": 32000, "logprob": -11.828125, "text": "<image>" }, { "id": 32000, "logprob": -11.453125, "text": "<image>" }, { "id": 32000, "logprob": -11.453125, "text": "<image>" }, { "id": 32000, "logprob": -11.984375, "text": "<image>" }, { "id": 32000, "logprob": -11.609375, "text": "<image>" }, { "id": 32000, "logprob": -11.8046875, "text": "<image>" }, { "id": 32000, "logprob": -11.7109375, "text": "<image>" }, { "id": 32000, "logprob": -12.0859375, "text": "<image>" }, { "id": 32000, "logprob": -10.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.734375, "text": "<image>" }, { "id": 32000, "logprob": -10.8046875, "text": "<image>" }, { "id": 32000, "logprob": -11.578125, "text": "<image>" }, { "id": 32000, "logprob": -11.15625, "text": "<image>" }, { "id": 32000, "logprob": -12.796875, "text": "<image>" }, { "id": 32000, "logprob": -11.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.328125, "text": "<image>" }, { "id": 32000, "logprob": -12.9296875, "text": "<image>" }, { "id": 32000, "logprob": -11.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.5625, "text": "<image>" }, { "id": 32000, "logprob": -11.84375, "text": "<image>" }, { "id": 32000, "logprob": -10.4140625, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -12.671875, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -13.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.0703125, "text": "<image>" }, { "id": 32000, "logprob": -13.015625, "text": "<image>" }, { "id": 32000, "logprob": -14.1484375, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -12.921875, "text": "<image>" }, { "id": 32000, "logprob": -12.96875, "text": "<image>" }, { "id": 32000, "logprob": -13.03125, "text": "<image>" }, { "id": 32000, "logprob": -13.953125, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -14.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.6015625, "text": "<image>" }, { "id": 32000, "logprob": -13.640625, "text": "<image>" }, { "id": 32000, "logprob": -15.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.7421875, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -11.5078125, "text": "<image>" }, { "id": 32000, "logprob": -12.3828125, "text": "<image>" }, { "id": 32000, "logprob": -13.6328125, "text": "<image>" }, { "id": 32000, "logprob": -12.4296875, "text": "<image>" }, { "id": 32000, "logprob": -11.3984375, "text": "<image>" }, { "id": 32000, "logprob": -11.5, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -14.0859375, "text": "<image>" }, { "id": 32000, "logprob": -10.9296875, "text": "<image>" }, { "id": 32000, "logprob": -11.0546875, "text": "<image>" }, { "id": 32000, "logprob": -11.296875, "text": "<image>" }, { "id": 32000, "logprob": -11.9609375, "text": "<image>" }, { "id": 32000, "logprob": -14.96875, "text": "<image>" }, { "id": 32000, "logprob": -12.6328125, "text": "<image>" }, { "id": 32000, "logprob": -12.75, "text": "<image>" }, { "id": 32000, "logprob": -12.6953125, "text": "<image>" }, { "id": 32000, "logprob": -11.9609375, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.2890625, "text": "<image>" }, { "id": 32000, "logprob": -11.421875, "text": "<image>" }, { "id": 32000, "logprob": -10.984375, "text": "<image>" }, { "id": 32000, "logprob": -12.140625, "text": "<image>" }, { "id": 32000, "logprob": -12.2578125, "text": "<image>" }, { "id": 32000, "logprob": -12.46875, "text": "<image>" }, { "id": 32000, "logprob": -11.5, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -13.9609375, "text": "<image>" }, { "id": 32000, "logprob": -14.1328125, "text": "<image>" }, { "id": 32000, "logprob": -12.9375, "text": "<image>" }, { "id": 32000, "logprob": -14.6328125, "text": "<image>" }, { "id": 32000, "logprob": -12.609375, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.7578125, "text": "<image>" }, { "id": 32000, "logprob": -12.8828125, "text": "<image>" }, { "id": 32000, "logprob": -13.3125, "text": "<image>" }, { "id": 32000, "logprob": -13.2734375, "text": "<image>" }, { "id": 32000, "logprob": -12.75, "text": "<image>" }, { "id": 32000, "logprob": -13.59375, "text": "<image>" }, { "id": 32000, "logprob": -13.8203125, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -13.765625, "text": "<image>" }, { "id": 32000, "logprob": -13.0625, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -14.1484375, "text": "<image>" }, { "id": 32000, "logprob": -13.25, "text": "<image>" }, { "id": 32000, "logprob": -11.9609375, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -15.0, "text": "<image>" }, { "id": 32000, "logprob": -12.9296875, "text": "<image>" }, { "id": 32000, "logprob": -12.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -11.9375, "text": "<image>" }, { "id": 32000, "logprob": -11.890625, "text": "<image>" }, { "id": 32000, "logprob": -12.5625, "text": "<image>" }, { "id": 32000, "logprob": -11.4453125, "text": "<image>" }, { "id": 32000, "logprob": -13.453125, "text": "<image>" }, { "id": 32000, "logprob": -11.109375, "text": "<image>" }, { "id": 32000, "logprob": -11.2890625, "text": "<image>" }, { "id": 32000, "logprob": -11.09375, "text": "<image>" }, { "id": 32000, "logprob": -11.1875, "text": "<image>" }, { "id": 32000, "logprob": -10.96875, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -11.4296875, "text": "<image>" }, { "id": 32000, "logprob": -12.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -11.3125, "text": "<image>" }, { "id": 32000, "logprob": -11.1171875, "text": "<image>" }, { "id": 32000, "logprob": -11.75, "text": "<image>" }, { "id": 32000, "logprob": -10.921875, "text": "<image>" }, { "id": 32000, "logprob": -10.7890625, "text": "<image>" }, { "id": 32000, "logprob": -11.1015625, "text": "<image>" }, { "id": 32000, "logprob": -11.4296875, "text": "<image>" }, { "id": 32000, "logprob": -12.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.7265625, "text": "<image>" }, { "id": 32000, "logprob": -12.6015625, "text": "<image>" }, { "id": 32000, "logprob": -12.6796875, "text": "<image>" }, { "id": 32000, "logprob": -12.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.296875, "text": "<image>" }, { "id": 32000, "logprob": -13.3671875, "text": "<image>" }, { "id": 32000, "logprob": -12.9765625, "text": "<image>" }, { "id": 32000, "logprob": -12.109375, "text": "<image>" }, { "id": 32000, "logprob": -12.53125, "text": "<image>" }, { "id": 32000, "logprob": -12.6015625, "text": "<image>" }, { "id": 32000, "logprob": -12.625, "text": "<image>" }, { "id": 32000, "logprob": -12.109375, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -13.4296875, "text": "<image>" }, { "id": 32000, "logprob": -13.9140625, "text": "<image>" }, { "id": 32000, "logprob": -14.71875, "text": "<image>" }, { "id": 32000, "logprob": -13.6015625, "text": "<image>" }, { "id": 32000, "logprob": -13.7578125, "text": "<image>" }, { "id": 32000, "logprob": -14.0703125, "text": "<image>" }, { "id": 32000, "logprob": -14.5390625, "text": "<image>" }, { "id": 32000, "logprob": -13.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -11.421875, "text": "<image>" }, { "id": 32000, "logprob": -11.625, "text": "<image>" }, { "id": 32000, "logprob": -12.1640625, "text": "<image>" }, { "id": 32000, "logprob": -13.2421875, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -11.4296875, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -12.0703125, "text": "<image>" }, { "id": 32000, "logprob": -11.2421875, "text": "<image>" }, { "id": 32000, "logprob": -11.59375, "text": "<image>" }, { "id": 32000, "logprob": -11.3125, "text": "<image>" }, { "id": 32000, "logprob": -11.453125, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -12.2109375, "text": "<image>" }, { "id": 32000, "logprob": -12.203125, "text": "<image>" }, { "id": 32000, "logprob": -12.03125, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -11.25, "text": "<image>" }, { "id": 32000, "logprob": -12.3203125, "text": "<image>" }, { "id": 32000, "logprob": -11.0390625, "text": "<image>" }, { "id": 32000, "logprob": -11.1953125, "text": "<image>" }, { "id": 32000, "logprob": -11.1171875, "text": "<image>" }, { "id": 32000, "logprob": -11.9375, "text": "<image>" }, { "id": 32000, "logprob": -10.984375, "text": "<image>" }, { "id": 32000, "logprob": -11.390625, "text": "<image>" }, { "id": 32000, "logprob": -12.8828125, "text": "<image>" }, { "id": 32000, "logprob": -14.15625, "text": "<image>" }, { "id": 32000, "logprob": -11.75, "text": "<image>" }, { "id": 32000, "logprob": -12.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.2421875, "text": "<image>" }, { "id": 32000, "logprob": -12.1171875, "text": "<image>" }, { "id": 32000, "logprob": -12.515625, "text": "<image>" }, { "id": 32000, "logprob": -11.890625, "text": "<image>" }, { "id": 32000, "logprob": -13.171875, "text": "<image>" }, { "id": 32000, "logprob": -15.796875, "text": "<image>" }, { "id": 32000, "logprob": -11.6015625, "text": "<image>" }, { "id": 32000, "logprob": -13.328125, "text": "<image>" }, { "id": 32000, "logprob": -13.6484375, "text": "<image>" }, { "id": 32000, "logprob": -14.875, "text": "<image>" }, { "id": 32000, "logprob": -13.7265625, "text": "<image>" }, { "id": 32000, "logprob": -13.5234375, "text": "<image>" }, { "id": 32000, "logprob": -13.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.609375, "text": "<image>" }, { "id": 32000, "logprob": -13.7890625, "text": "<image>" }, { "id": 32000, "logprob": -14.359375, "text": "<image>" }, { "id": 32000, "logprob": -12.1484375, "text": "<image>" }, { "id": 32000, "logprob": -11.8125, "text": "<image>" }, { "id": 32000, "logprob": -11.890625, "text": "<image>" }, { "id": 32000, "logprob": -11.4921875, "text": "<image>" }, { "id": 32000, "logprob": -12.5703125, "text": "<image>" }, { "id": 32000, "logprob": -11.625, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.578125, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -12.34375, "text": "<image>" }, { "id": 32000, "logprob": -11.234375, "text": "<image>" }, { "id": 32000, "logprob": -11.03125, "text": "<image>" }, { "id": 32000, "logprob": -11.7265625, "text": "<image>" }, { "id": 32000, "logprob": -11.84375, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -11.2421875, "text": "<image>" }, { "id": 32000, "logprob": -11.328125, "text": "<image>" }, { "id": 32000, "logprob": -11.953125, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.3984375, "text": "<image>" }, { "id": 32000, "logprob": -11.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.203125, "text": "<image>" }, { "id": 32000, "logprob": -13.609375, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.578125, "text": "<image>" }, { "id": 32000, "logprob": -11.65625, "text": "<image>" }, { "id": 32000, "logprob": -12.25, "text": "<image>" }, { "id": 32000, "logprob": -11.7109375, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -11.3359375, "text": "<image>" }, { "id": 32000, "logprob": -11.5859375, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -11.3046875, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -12.6953125, "text": "<image>" }, { "id": 32000, "logprob": -12.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.875, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -13.1875, "text": "<image>" }, { "id": 32000, "logprob": -13.7265625, "text": "<image>" }, { "id": 32000, "logprob": -12.4296875, "text": "<image>" }, { "id": 32000, "logprob": -13.8203125, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.09375, "text": "<image>" }, { "id": 32000, "logprob": -14.546875, "text": "<image>" }, { "id": 32000, "logprob": -14.015625, "text": "<image>" }, { "id": 32000, "logprob": -12.5546875, "text": "<image>" }, { "id": 32000, "logprob": -11.7890625, "text": "<image>" }, { "id": 32000, "logprob": -10.6953125, "text": "<image>" }, { "id": 32000, "logprob": -11.59375, "text": "<image>" }, { "id": 32000, "logprob": -12.0234375, "text": "<image>" }, { "id": 32000, "logprob": -12.734375, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -14.34375, "text": "<image>" }, { "id": 32000, "logprob": -11.8984375, "text": "<image>" }, { "id": 32000, "logprob": -13.4375, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -11.03125, "text": "<image>" }, { "id": 32000, "logprob": -11.421875, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -11.2734375, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -11.6640625, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -11.484375, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.875, "text": "<image>" }, { "id": 32000, "logprob": -12.3125, "text": "<image>" }, { "id": 32000, "logprob": -11.65625, "text": "<image>" }, { "id": 32000, "logprob": -12.9140625, "text": "<image>" }, { "id": 32000, "logprob": -11.7421875, "text": "<image>" }, { "id": 32000, "logprob": -11.7265625, "text": "<image>" }, { "id": 32000, "logprob": -11.3984375, "text": "<image>" }, { "id": 32000, "logprob": -11.53125, "text": "<image>" }, { "id": 32000, "logprob": -11.34375, "text": "<image>" }, { "id": 32000, "logprob": -11.875, "text": "<image>" }, { "id": 32000, "logprob": -12.890625, "text": "<image>" }, { "id": 32000, "logprob": -10.9609375, "text": "<image>" }, { "id": 32000, "logprob": -13.265625, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -14.6640625, "text": "<image>" }, { "id": 32000, "logprob": -12.3828125, "text": "<image>" }, { "id": 32000, "logprob": -14.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -15.125, "text": "<image>" }, { "id": 32000, "logprob": -12.6640625, "text": "<image>" }, { "id": 32000, "logprob": -12.734375, "text": "<image>" }, { "id": 32000, "logprob": -14.734375, "text": "<image>" }, { "id": 32000, "logprob": -13.5234375, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -13.40625, "text": "<image>" }, { "id": 32000, "logprob": -14.3828125, "text": "<image>" }, { "id": 32000, "logprob": -12.9453125, "text": "<image>" }, { "id": 32000, "logprob": -12.609375, "text": "<image>" }, { "id": 32000, "logprob": -11.9296875, "text": "<image>" }, { "id": 32000, "logprob": -12.953125, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.0078125, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -13.8828125, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -13.84375, "text": "<image>" }, { "id": 32000, "logprob": -14.25, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.09375, "text": "<image>" }, { "id": 32000, "logprob": -13.0078125, "text": "<image>" }, { "id": 32000, "logprob": -12.140625, "text": "<image>" }, { "id": 32000, "logprob": -13.1171875, "text": "<image>" }, { "id": 32000, "logprob": -13.96875, "text": "<image>" }, { "id": 32000, "logprob": -11.625, "text": "<image>" }, { "id": 32000, "logprob": -11.484375, "text": "<image>" }, { "id": 32000, "logprob": -11.328125, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -13.5859375, "text": "<image>" }, { "id": 32000, "logprob": -11.0, "text": "<image>" }, { "id": 32000, "logprob": -12.03125, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -11.46875, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -11.7578125, "text": "<image>" }, { "id": 32000, "logprob": -11.296875, "text": "<image>" }, { "id": 32000, "logprob": -12.2109375, "text": "<image>" }, { "id": 32000, "logprob": -11.7421875, "text": "<image>" }, { "id": 32000, "logprob": -10.921875, "text": "<image>" }, { "id": 32000, "logprob": -11.953125, "text": "<image>" }, { "id": 32000, "logprob": -13.203125, "text": "<image>" }, { "id": 32000, "logprob": -11.5625, "text": "<image>" }, { "id": 32000, "logprob": -14.7421875, "text": "<image>" }, { "id": 32000, "logprob": -12.6875, "text": "<image>" }, { "id": 32000, "logprob": -12.2578125, "text": "<image>" }, { "id": 32000, "logprob": -15.296875, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -12.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -13.6875, "text": "<image>" }, { "id": 32000, "logprob": -12.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -13.6796875, "text": "<image>" }, { "id": 32000, "logprob": -14.109375, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.6953125, "text": "<image>" }, { "id": 32000, "logprob": -11.84375, "text": "<image>" }, { "id": 32000, "logprob": -11.7421875, "text": "<image>" }, { "id": 32000, "logprob": -12.7578125, "text": "<image>" }, { "id": 32000, "logprob": -11.8203125, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.453125, "text": "<image>" }, { "id": 32000, "logprob": -11.359375, "text": "<image>" }, { "id": 32000, "logprob": -12.984375, "text": "<image>" }, { "id": 32000, "logprob": -15.15625, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -12.765625, "text": "<image>" }, { "id": 32000, "logprob": -13.4921875, "text": "<image>" }, { "id": 32000, "logprob": -11.5546875, "text": "<image>" }, { "id": 32000, "logprob": -11.6953125, "text": "<image>" }, { "id": 32000, "logprob": -11.359375, "text": "<image>" }, { "id": 32000, "logprob": -11.5, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -14.5625, "text": "<image>" }, { "id": 32000, "logprob": -13.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.1953125, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.7578125, "text": "<image>" }, { "id": 32000, "logprob": -11.6015625, "text": "<image>" }, { "id": 32000, "logprob": -13.2578125, "text": "<image>" }, { "id": 32000, "logprob": -11.125, "text": "<image>" }, { "id": 32000, "logprob": -11.3359375, "text": "<image>" }, { "id": 32000, "logprob": -11.3984375, "text": "<image>" }, { "id": 32000, "logprob": -11.8125, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -11.765625, "text": "<image>" }, { "id": 32000, "logprob": -11.1953125, "text": "<image>" }, { "id": 32000, "logprob": -13.3359375, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -12.703125, "text": "<image>" }, { "id": 32000, "logprob": -12.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.3203125, "text": "<image>" }, { "id": 32000, "logprob": -13.6171875, "text": "<image>" }, { "id": 32000, "logprob": -13.328125, "text": "<image>" }, { "id": 32000, "logprob": -12.828125, "text": "<image>" }, { "id": 32000, "logprob": -13.140625, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.796875, "text": "<image>" }, { "id": 32000, "logprob": -12.78125, "text": "<image>" }, { "id": 32000, "logprob": -12.59375, "text": "<image>" }, { "id": 32000, "logprob": -12.765625, "text": "<image>" }, { "id": 32000, "logprob": -11.46875, "text": "<image>" }, { "id": 32000, "logprob": -12.484375, "text": "<image>" }, { "id": 32000, "logprob": -11.59375, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -12.59375, "text": "<image>" }, { "id": 32000, "logprob": -11.3125, "text": "<image>" }, { "id": 32000, "logprob": -14.25, "text": "<image>" }, { "id": 32000, "logprob": -12.0234375, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -11.609375, "text": "<image>" }, { "id": 32000, "logprob": -13.5234375, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -12.6953125, "text": "<image>" }, { "id": 32000, "logprob": -11.703125, "text": "<image>" }, { "id": 32000, "logprob": -11.8828125, "text": "<image>" }, { "id": 32000, "logprob": -11.84375, "text": "<image>" }, { "id": 32000, "logprob": -11.7890625, "text": "<image>" }, { "id": 32000, "logprob": -12.3515625, "text": "<image>" }, { "id": 32000, "logprob": -14.3046875, "text": "<image>" }, { "id": 32000, "logprob": -11.7578125, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -11.328125, "text": "<image>" }, { "id": 32000, "logprob": -14.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.25, "text": "<image>" }, { "id": 32000, "logprob": -11.3125, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -11.875, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -13.71875, "text": "<image>" }, { "id": 32000, "logprob": -12.0625, "text": "<image>" }, { "id": 32000, "logprob": -11.046875, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -13.390625, "text": "<image>" }, { "id": 32000, "logprob": -12.515625, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -14.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.5625, "text": "<image>" }, { "id": 32000, "logprob": -11.53125, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.46875, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -12.515625, "text": "<image>" }, { "id": 32000, "logprob": -11.7890625, "text": "<image>" }, { "id": 32000, "logprob": -13.1328125, "text": "<image>" }, { "id": 32000, "logprob": -12.8984375, "text": "<image>" }, { "id": 32000, "logprob": -14.609375, "text": "<image>" }, { "id": 32000, "logprob": -11.53125, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -13.0625, "text": "<image>" }, { "id": 32000, "logprob": -11.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -12.0234375, "text": "<image>" }, { "id": 32000, "logprob": -12.453125, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.9296875, "text": "<image>" }, { "id": 32000, "logprob": -11.7109375, "text": "<image>" }, { "id": 32000, "logprob": -11.546875, "text": "<image>" }, { "id": 32000, "logprob": -12.796875, "text": "<image>" }, { "id": 32000, "logprob": -13.0234375, "text": "<image>" }, { "id": 32000, "logprob": -12.8125, "text": "<image>" }, { "id": 32000, "logprob": -12.59375, "text": "<image>" }, { "id": 32000, "logprob": -10.7890625, "text": "<image>" }, { "id": 32000, "logprob": -11.96875, "text": "<image>" }, { "id": 32000, "logprob": -11.4921875, "text": "<image>" }, { "id": 32000, "logprob": -11.25, "text": "<image>" }, { "id": 32000, "logprob": -11.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.6953125, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -11.515625, "text": "<image>" }, { "id": 32000, "logprob": -11.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -11.2109375, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.6484375, "text": "<image>" }, { "id": 32000, "logprob": -11.515625, "text": "<image>" }, { "id": 32000, "logprob": -12.8125, "text": "<image>" }, { "id": 32000, "logprob": -12.1953125, "text": "<image>" }, { "id": 32000, "logprob": -13.2890625, "text": "<image>" }, { "id": 32000, "logprob": -11.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.3359375, "text": "<image>" }, { "id": 32000, "logprob": -12.6015625, "text": "<image>" }, { "id": 32000, "logprob": -13.0078125, "text": "<image>" }, { "id": 32000, "logprob": -15.328125, "text": "<image>" }, { "id": 32000, "logprob": -14.8125, "text": "<image>" }, { "id": 32000, "logprob": -11.0390625, "text": "<image>" }, { "id": 32000, "logprob": -13.0703125, "text": "<image>" }, { "id": 32000, "logprob": -11.3828125, "text": "<image>" }, { "id": 32000, "logprob": -14.0859375, "text": "<image>" }, { "id": 32000, "logprob": -14.734375, "text": "<image>" }, { "id": 32000, "logprob": -10.0234375, "text": "<image>" }, { "id": 32000, "logprob": -11.46875, "text": "<image>" }, { "id": 32000, "logprob": -13.953125, "text": "<image>" }, { "id": 32000, "logprob": -14.28125, "text": "<image>" }, { "id": 32000, "logprob": -14.8828125, "text": "<image>" }, { "id": 32000, "logprob": -11.578125, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -12.34375, "text": "<image>" }, { "id": 32000, "logprob": -11.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.0078125, "text": "<image>" }, { "id": 32000, "logprob": -12.4296875, "text": "<image>" }, { "id": 32000, "logprob": -11.8984375, "text": "<image>" }, { "id": 32000, "logprob": -11.984375, "text": "<image>" }, { "id": 32000, "logprob": -11.734375, "text": "<image>" }, { "id": 32000, "logprob": -11.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.1875, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -12.953125, "text": "<image>" }, { "id": 32000, "logprob": -10.40625, "text": "<image>" }, { "id": 32000, "logprob": -13.1015625, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.7890625, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -11.765625, "text": "<image>" }, { "id": 32000, "logprob": -11.578125, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.34375, "text": "<image>" }, { "id": 32000, "logprob": -12.015625, "text": "<image>" }, { "id": 32000, "logprob": -11.75, "text": "<image>" }, { "id": 32000, "logprob": -12.2578125, "text": "<image>" }, { "id": 32000, "logprob": -11.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -12.4453125, "text": "<image>" }, { "id": 32000, "logprob": -11.8671875, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -15.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -12.25, "text": "<image>" }, { "id": 32000, "logprob": -14.421875, "text": "<image>" }, { "id": 32000, "logprob": -13.03125, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -14.8046875, "text": "<image>" }, { "id": 32000, "logprob": -13.53125, "text": "<image>" }, { "id": 32000, "logprob": -15.03125, "text": "<image>" }, { "id": 32000, "logprob": -13.8984375, "text": "<image>" }, { "id": 32000, "logprob": -10.578125, "text": "<image>" }, { "id": 32000, "logprob": -15.0, "text": "<image>" }, { "id": 32000, "logprob": -12.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.8203125, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -11.15625, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -11.515625, "text": "<image>" }, { "id": 32000, "logprob": -11.4765625, "text": "<image>" }, { "id": 32000, "logprob": -11.4765625, "text": "<image>" }, { "id": 32000, "logprob": -11.4609375, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.9609375, "text": "<image>" }, { "id": 32000, "logprob": -12.8515625, "text": "<image>" }, { "id": 32000, "logprob": -12.03125, "text": "<image>" }, { "id": 32000, "logprob": -13.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.3203125, "text": "<image>" }, { "id": 32000, "logprob": -10.8515625, "text": "<image>" }, { "id": 32000, "logprob": -10.671875, "text": "<image>" }, { "id": 32000, "logprob": -11.875, "text": "<image>" }, { "id": 32000, "logprob": -10.9140625, "text": "<image>" }, { "id": 32000, "logprob": -10.65625, "text": "<image>" }, { "id": 32000, "logprob": -10.484375, "text": "<image>" }, { "id": 32000, "logprob": -11.1796875, "text": "<image>" }, { "id": 32000, "logprob": -12.2890625, "text": "<image>" }, { "id": 32000, "logprob": -10.96875, "text": "<image>" }, { "id": 32000, "logprob": -11.6015625, "text": "<image>" }, { "id": 32000, "logprob": -11.4453125, "text": "<image>" }, { "id": 32000, "logprob": -10.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.1640625, "text": "<image>" }, { "id": 32000, "logprob": -11.0859375, "text": "<image>" }, { "id": 32000, "logprob": -11.546875, "text": "<image>" }, { "id": 32000, "logprob": -15.765625, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.484375, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -10.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.25, "text": "<image>" }, { "id": 32000, "logprob": -11.828125, "text": "<image>" }, { "id": 32000, "logprob": -11.84375, "text": "<image>" }, { "id": 32000, "logprob": -11.0625, "text": "<image>" }, { "id": 32000, "logprob": -13.953125, "text": "<image>" }, { "id": 32000, "logprob": -13.5703125, "text": "<image>" }, { "id": 32000, "logprob": -13.1796875, "text": "<image>" }, { "id": 32000, "logprob": -13.6171875, "text": "<image>" }, { "id": 32000, "logprob": -14.0078125, "text": "<image>" }, { "id": 32000, "logprob": -11.171875, "text": "<image>" }, { "id": 32000, "logprob": -14.140625, "text": "<image>" }, { "id": 32000, "logprob": -12.25, "text": "<image>" }, { "id": 32000, "logprob": -11.7265625, "text": "<image>" }, { "id": 32000, "logprob": -12.3203125, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -11.8828125, "text": "<image>" }, { "id": 32000, "logprob": -12.3125, "text": "<image>" }, { "id": 32000, "logprob": -11.5625, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -14.6796875, "text": "<image>" }, { "id": 32000, "logprob": -11.046875, "text": "<image>" }, { "id": 32000, "logprob": -11.5, "text": "<image>" }, { "id": 32000, "logprob": -12.8125, "text": "<image>" }, { "id": 32000, "logprob": -11.9296875, "text": "<image>" }, { "id": 32000, "logprob": -10.546875, "text": "<image>" }, { "id": 32000, "logprob": -11.7109375, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -12.0859375, "text": "<image>" }, { "id": 32000, "logprob": -11.4296875, "text": "<image>" }, { "id": 32000, "logprob": -12.46875, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -11.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.96875, "text": "<image>" }, { "id": 32000, "logprob": -10.875, "text": "<image>" }, { "id": 32000, "logprob": -10.890625, "text": "<image>" }, { "id": 32000, "logprob": -11.0078125, "text": "<image>" }, { "id": 32000, "logprob": -10.609375, "text": "<image>" }, { "id": 32000, "logprob": -10.7890625, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -11.390625, "text": "<image>" }, { "id": 32000, "logprob": -11.2890625, "text": "<image>" }, { "id": 32000, "logprob": -11.984375, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -12.2421875, "text": "<image>" }, { "id": 32000, "logprob": -11.3828125, "text": "<image>" }, { "id": 32000, "logprob": -10.7734375, "text": "<image>" }, { "id": 32000, "logprob": -10.703125, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -12.0, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -11.4453125, "text": "<image>" }, { "id": 32000, "logprob": -9.6328125, "text": "<image>" }, { "id": 32000, "logprob": -10.859375, "text": "<image>" }, { "id": 32000, "logprob": -13.03125, "text": "<image>" }, { "id": 32000, "logprob": -12.890625, "text": "<image>" }, { "id": 32000, "logprob": -13.4765625, "text": "<image>" }, { "id": 32000, "logprob": -14.109375, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -13.484375, "text": "<image>" }, { "id": 32000, "logprob": -13.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -10.90625, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -12.5703125, "text": "<image>" }, { "id": 32000, "logprob": -12.7734375, "text": "<image>" }, { "id": 32000, "logprob": -12.890625, "text": "<image>" }, { "id": 32000, "logprob": -13.0625, "text": "<image>" }, { "id": 32000, "logprob": -11.2265625, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -12.2890625, "text": "<image>" }, { "id": 32000, "logprob": -11.859375, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -15.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -11.0, "text": "<image>" }, { "id": 32000, "logprob": -11.2734375, "text": "<image>" }, { "id": 32000, "logprob": -12.3203125, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -10.9296875, "text": "<image>" }, { "id": 32000, "logprob": -10.921875, "text": "<image>" }, { "id": 32000, "logprob": -10.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.59375, "text": "<image>" }, { "id": 32000, "logprob": -11.40625, "text": "<image>" }, { "id": 32000, "logprob": -11.53125, "text": "<image>" }, { "id": 32000, "logprob": -10.8046875, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -11.6875, "text": "<image>" }, { "id": 32000, "logprob": -11.6484375, "text": "<image>" }, { "id": 32000, "logprob": -12.5078125, "text": "<image>" }, { "id": 32000, "logprob": -12.1171875, "text": "<image>" }, { "id": 32000, "logprob": -12.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -12.0, "text": "<image>" }, { "id": 32000, "logprob": -13.7265625, "text": "<image>" }, { "id": 32000, "logprob": -15.7734375, "text": "<image>" }, { "id": 32000, "logprob": -11.65625, "text": "<image>" }, { "id": 32000, "logprob": -12.359375, "text": "<image>" }, { "id": 32000, "logprob": -12.1484375, "text": "<image>" }, { "id": 32000, "logprob": -14.21875, "text": "<image>" }, { "id": 32000, "logprob": -14.796875, "text": "<image>" }, { "id": 32000, "logprob": -17.5625, "text": "<image>" }, { "id": 32000, "logprob": -15.234375, "text": "<image>" }, { "id": 32000, "logprob": -14.4765625, "text": "<image>" }, { "id": 32000, "logprob": -10.6015625, "text": "<image>" }, { "id": 32000, "logprob": -12.0703125, "text": "<image>" }, { "id": 32000, "logprob": -12.84375, "text": "<image>" }, { "id": 32000, "logprob": -12.2421875, "text": "<image>" }, { "id": 32000, "logprob": -13.203125, "text": "<image>" }, { "id": 32000, "logprob": -11.3203125, "text": "<image>" }, { "id": 32000, "logprob": -10.9375, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -10.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.8125, "text": "<image>" }, { "id": 32000, "logprob": -11.8203125, "text": "<image>" }, { "id": 32000, "logprob": -12.296875, "text": "<image>" }, { "id": 32000, "logprob": -10.8046875, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -15.2265625, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -12.015625, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -11.109375, "text": "<image>" }, { "id": 32000, "logprob": -12.703125, "text": "<image>" }, { "id": 32000, "logprob": -11.6328125, "text": "<image>" }, { "id": 32000, "logprob": -11.515625, "text": "<image>" }, { "id": 32000, "logprob": -11.546875, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.453125, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -12.9609375, "text": "<image>" }, { "id": 32000, "logprob": -12.578125, "text": "<image>" }, { "id": 32000, "logprob": -11.8671875, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -11.125, "text": "<image>" }, { "id": 32000, "logprob": -11.5, "text": "<image>" }, { "id": 32000, "logprob": -12.1484375, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -14.453125, "text": "<image>" }, { "id": 32000, "logprob": -12.7890625, "text": "<image>" }, { "id": 32000, "logprob": -14.890625, "text": "<image>" }, { "id": 32000, "logprob": -12.984375, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -14.7421875, "text": "<image>" }, { "id": 32000, "logprob": -13.34375, "text": "<image>" }, { "id": 32000, "logprob": -13.3828125, "text": "<image>" }, { "id": 32000, "logprob": -14.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.71875, "text": "<image>" }, { "id": 32000, "logprob": -14.8203125, "text": "<image>" }, { "id": 32000, "logprob": -14.625, "text": "<image>" }, { "id": 32000, "logprob": -12.6875, "text": "<image>" }, { "id": 32000, "logprob": -12.4296875, "text": "<image>" }, { "id": 32000, "logprob": -12.6171875, "text": "<image>" }, { "id": 32000, "logprob": -13.765625, "text": "<image>" }, { "id": 32000, "logprob": -13.8046875, "text": "<image>" }, { "id": 32000, "logprob": -11.4921875, "text": "<image>" }, { "id": 32000, "logprob": -12.5078125, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -13.796875, "text": "<image>" }, { "id": 32000, "logprob": -11.203125, "text": "<image>" }, { "id": 32000, "logprob": -11.78125, "text": "<image>" }, { "id": 32000, "logprob": -16.53125, "text": "<image>" }, { "id": 32000, "logprob": -12.296875, "text": "<image>" }, { "id": 32000, "logprob": -14.1484375, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -12.4921875, "text": "<image>" }, { "id": 32000, "logprob": -10.59375, "text": "<image>" }, { "id": 32000, "logprob": -12.9296875, "text": "<image>" }, { "id": 32000, "logprob": -13.6328125, "text": "<image>" }, { "id": 32000, "logprob": -12.3203125, "text": "<image>" }, { "id": 32000, "logprob": -12.359375, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -12.1953125, "text": "<image>" }, { "id": 32000, "logprob": -12.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.6484375, "text": "<image>" }, { "id": 32000, "logprob": -11.109375, "text": "<image>" }, { "id": 32000, "logprob": -11.140625, "text": "<image>" }, { "id": 32000, "logprob": -13.90625, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -14.9921875, "text": "<image>" }, { "id": 32000, "logprob": -13.375, "text": "<image>" }, { "id": 32000, "logprob": -14.703125, "text": "<image>" }, { "id": 32000, "logprob": -11.328125, "text": "<image>" }, { "id": 32000, "logprob": -14.890625, "text": "<image>" }, { "id": 32000, "logprob": -13.1875, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -13.3984375, "text": "<image>" }, { "id": 32000, "logprob": -13.9453125, "text": "<image>" }, { "id": 32000, "logprob": -13.1875, "text": "<image>" }, { "id": 32000, "logprob": -11.28125, "text": "<image>" }, { "id": 32000, "logprob": -15.1640625, "text": "<image>" }, { "id": 32000, "logprob": -15.0234375, "text": "<image>" }, { "id": 32000, "logprob": -14.8203125, "text": "<image>" }, { "id": 32000, "logprob": -14.828125, "text": "<image>" }, { "id": 32000, "logprob": -14.078125, "text": "<image>" }, { "id": 32000, "logprob": -13.8984375, "text": "<image>" }, { "id": 32000, "logprob": -15.3125, "text": "<image>" }, { "id": 32000, "logprob": -12.8125, "text": "<image>" }, { "id": 32000, "logprob": -12.90625, "text": "<image>" }, { "id": 32000, "logprob": -13.046875, "text": "<image>" }, { "id": 32000, "logprob": -10.625, "text": "<image>" }, { "id": 32000, "logprob": -11.25, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -13.3984375, "text": "<image>" }, { "id": 32000, "logprob": -12.484375, "text": "<image>" }, { "id": 32000, "logprob": -12.2578125, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -12.6015625, "text": "<image>" }, { "id": 32000, "logprob": -12.421875, "text": "<image>" }, { "id": 32000, "logprob": -12.484375, "text": "<image>" }, { "id": 32000, "logprob": -14.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.3125, "text": "<image>" }, { "id": 32000, "logprob": -12.578125, "text": "<image>" }, { "id": 32000, "logprob": -15.1953125, "text": "<image>" }, { "id": 32000, "logprob": -12.4921875, "text": "<image>" }, { "id": 32000, "logprob": -11.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -12.15625, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.7734375, "text": "<image>" }, { "id": 32000, "logprob": -11.3046875, "text": "<image>" }, { "id": 32000, "logprob": -11.109375, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -13.0234375, "text": "<image>" }, { "id": 32000, "logprob": -10.9296875, "text": "<image>" }, { "id": 32000, "logprob": -13.5078125, "text": "<image>" }, { "id": 32000, "logprob": -11.5, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -11.703125, "text": "<image>" }, { "id": 32000, "logprob": -14.0234375, "text": "<image>" }, { "id": 32000, "logprob": -15.2578125, "text": "<image>" }, { "id": 32000, "logprob": -17.953125, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -11.6875, "text": "<image>" }, { "id": 32000, "logprob": -12.1171875, "text": "<image>" }, { "id": 32000, "logprob": -11.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.90625, "text": "<image>" }, { "id": 32000, "logprob": -12.6171875, "text": "<image>" }, { "id": 32000, "logprob": -13.53125, "text": "<image>" }, { "id": 32000, "logprob": -14.828125, "text": "<image>" }, { "id": 32000, "logprob": -13.953125, "text": "<image>" }, { "id": 32000, "logprob": -11.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.7421875, "text": "<image>" }, { "id": 32000, "logprob": -13.484375, "text": "<image>" }, { "id": 32000, "logprob": -11.4609375, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.984375, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -15.9375, "text": "<image>" }, { "id": 32000, "logprob": -11.984375, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -12.03125, "text": "<image>" }, { "id": 32000, "logprob": -14.3046875, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -12.1171875, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -13.1484375, "text": "<image>" }, { "id": 32000, "logprob": -12.1875, "text": "<image>" }, { "id": 32000, "logprob": -13.359375, "text": "<image>" }, { "id": 32000, "logprob": -13.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.2890625, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -10.7421875, "text": "<image>" }, { "id": 32000, "logprob": -10.515625, "text": "<image>" }, { "id": 32000, "logprob": -12.6640625, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -11.484375, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -11.890625, "text": "<image>" }, { "id": 32000, "logprob": -11.140625, "text": "<image>" }, { "id": 32000, "logprob": -10.96875, "text": "<image>" }, { "id": 32000, "logprob": -11.609375, "text": "<image>" }, { "id": 32000, "logprob": -12.4609375, "text": "<image>" }, { "id": 32000, "logprob": -12.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.8203125, "text": "<image>" }, { "id": 32000, "logprob": -13.359375, "text": "<image>" }, { "id": 32000, "logprob": -12.0234375, "text": "<image>" }, { "id": 32000, "logprob": -12.484375, "text": "<image>" }, { "id": 32000, "logprob": -14.2890625, "text": "<image>" }, { "id": 32000, "logprob": -11.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.890625, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -15.3046875, "text": "<image>" }, { "id": 32000, "logprob": -12.8828125, "text": "<image>" }, { "id": 32000, "logprob": -13.453125, "text": "<image>" }, { "id": 32000, "logprob": -11.8125, "text": "<image>" }, { "id": 32000, "logprob": -12.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.9765625, "text": "<image>" }, { "id": 32000, "logprob": -11.96875, "text": "<image>" }, { "id": 32000, "logprob": -13.4296875, "text": "<image>" }, { "id": 32000, "logprob": -14.9453125, "text": "<image>" }, { "id": 32000, "logprob": -13.28125, "text": "<image>" }, { "id": 32000, "logprob": -13.2578125, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -11.453125, "text": "<image>" }, { "id": 32000, "logprob": -12.1640625, "text": "<image>" }, { "id": 32000, "logprob": -10.3828125, "text": "<image>" }, { "id": 32000, "logprob": -11.8203125, "text": "<image>" }, { "id": 32000, "logprob": -13.203125, "text": "<image>" }, { "id": 32000, "logprob": -12.3359375, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -13.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.65625, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.9375, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -12.3515625, "text": "<image>" }, { "id": 32000, "logprob": -10.5625, "text": "<image>" }, { "id": 32000, "logprob": -10.90625, "text": "<image>" }, { "id": 32000, "logprob": -11.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.3828125, "text": "<image>" }, { "id": 32000, "logprob": -11.5234375, "text": "<image>" }, { "id": 32000, "logprob": -16.578125, "text": "<image>" }, { "id": 32000, "logprob": -12.515625, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -12.5390625, "text": "<image>" }, { "id": 32000, "logprob": -12.0078125, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -13.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -11.9296875, "text": "<image>" }, { "id": 32000, "logprob": -10.59375, "text": "<image>" }, { "id": 32000, "logprob": -10.53125, "text": "<image>" }, { "id": 32000, "logprob": -11.4375, "text": "<image>" }, { "id": 32000, "logprob": -11.2109375, "text": "<image>" }, { "id": 32000, "logprob": -13.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.15625, "text": "<image>" }, { "id": 32000, "logprob": -15.796875, "text": "<image>" }, { "id": 32000, "logprob": -10.9765625, "text": "<image>" }, { "id": 32000, "logprob": -11.6328125, "text": "<image>" }, { "id": 32000, "logprob": -10.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.859375, "text": "<image>" }, { "id": 32000, "logprob": -13.125, "text": "<image>" }, { "id": 32000, "logprob": -12.6640625, "text": "<image>" }, { "id": 32000, "logprob": -16.046875, "text": "<image>" }, { "id": 32000, "logprob": -11.515625, "text": "<image>" }, { "id": 32000, "logprob": -13.734375, "text": "<image>" }, { "id": 32000, "logprob": -14.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.2109375, "text": "<image>" }, { "id": 32000, "logprob": -10.65625, "text": "<image>" }, { "id": 32000, "logprob": -15.6484375, "text": "<image>" }, { "id": 32000, "logprob": -10.9296875, "text": "<image>" }, { "id": 32000, "logprob": -11.328125, "text": "<image>" }, { "id": 32000, "logprob": -11.0546875, "text": "<image>" }, { "id": 32000, "logprob": -10.4765625, "text": "<image>" }, { "id": 32000, "logprob": -10.96875, "text": "<image>" }, { "id": 32000, "logprob": -10.890625, "text": "<image>" }, { "id": 32000, "logprob": -10.8046875, "text": "<image>" }, { "id": 32000, "logprob": -12.359375, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -14.2734375, "text": "<image>" }, { "id": 32000, "logprob": -11.4921875, "text": "<image>" }, { "id": 32000, "logprob": -10.984375, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -13.21875, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -11.8828125, "text": "<image>" }, { "id": 32000, "logprob": -12.15625, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -12.0703125, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -13.0390625, "text": "<image>" }, { "id": 32000, "logprob": -12.1484375, "text": "<image>" }, { "id": 32000, "logprob": -13.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.203125, "text": "<image>" }, { "id": 32000, "logprob": -11.9375, "text": "<image>" }, { "id": 32000, "logprob": -12.1796875, "text": "<image>" }, { "id": 32000, "logprob": -10.7421875, "text": "<image>" }, { "id": 32000, "logprob": -10.8203125, "text": "<image>" }, { "id": 32000, "logprob": -12.375, "text": "<image>" }, { "id": 32000, "logprob": -11.484375, "text": "<image>" }, { "id": 32000, "logprob": -15.84375, "text": "<image>" }, { "id": 32000, "logprob": -12.1640625, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -13.1640625, "text": "<image>" }, { "id": 32000, "logprob": -16.8125, "text": "<image>" }, { "id": 32000, "logprob": -15.765625, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -12.46875, "text": "<image>" }, { "id": 32000, "logprob": -13.109375, "text": "<image>" }, { "id": 32000, "logprob": -14.5078125, "text": "<image>" }, { "id": 32000, "logprob": -14.5546875, "text": "<image>" }, { "id": 32000, "logprob": -10.953125, "text": "<image>" }, { "id": 32000, "logprob": -13.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.3984375, "text": "<image>" }, { "id": 32000, "logprob": -12.9375, "text": "<image>" }, { "id": 32000, "logprob": -12.953125, "text": "<image>" }, { "id": 32000, "logprob": -12.0234375, "text": "<image>" }, { "id": 32000, "logprob": -12.3125, "text": "<image>" }, { "id": 32000, "logprob": -17.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.5, "text": "<image>" }, { "id": 32000, "logprob": -12.484375, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.84375, "text": "<image>" }, { "id": 32000, "logprob": -13.515625, "text": "<image>" }, { "id": 32000, "logprob": -14.171875, "text": "<image>" }, { "id": 32000, "logprob": -12.9609375, "text": "<image>" }, { "id": 32000, "logprob": -11.265625, "text": "<image>" }, { "id": 32000, "logprob": -14.828125, "text": "<image>" }, { "id": 32000, "logprob": -11.1171875, "text": "<image>" }, { "id": 32000, "logprob": -11.953125, "text": "<image>" }, { "id": 32000, "logprob": -14.6640625, "text": "<image>" }, { "id": 32000, "logprob": -11.2421875, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -12.8125, "text": "<image>" }, { "id": 32000, "logprob": -11.25, "text": "<image>" }, { "id": 32000, "logprob": -11.8125, "text": "<image>" }, { "id": 32000, "logprob": -14.28125, "text": "<image>" }, { "id": 32000, "logprob": -11.09375, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.1875, "text": "<image>" }, { "id": 32000, "logprob": -13.765625, "text": "<image>" }, { "id": 32000, "logprob": -12.25, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -13.796875, "text": "<image>" }, { "id": 32000, "logprob": -10.6171875, "text": "<image>" }, { "id": 32000, "logprob": -11.546875, "text": "<image>" }, { "id": 32000, "logprob": -10.9609375, "text": "<image>" }, { "id": 32000, "logprob": -12.1171875, "text": "<image>" }, { "id": 32000, "logprob": -10.796875, "text": "<image>" }, { "id": 32000, "logprob": -12.6328125, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -13.140625, "text": "<image>" }, { "id": 32000, "logprob": -14.21875, "text": "<image>" }, { "id": 32000, "logprob": -13.0546875, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -11.6875, "text": "<image>" }, { "id": 32000, "logprob": -12.7265625, "text": "<image>" }, { "id": 32000, "logprob": -17.09375, "text": "<image>" }, { "id": 32000, "logprob": -15.4375, "text": "<image>" }, { "id": 32000, "logprob": -11.890625, "text": "<image>" }, { "id": 32000, "logprob": -13.6015625, "text": "<image>" }, { "id": 32000, "logprob": -11.4921875, "text": "<image>" }, { "id": 32000, "logprob": -11.8984375, "text": "<image>" }, { "id": 32000, "logprob": -13.375, "text": "<image>" }, { "id": 32000, "logprob": -13.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.5390625, "text": "<image>" }, { "id": 32000, "logprob": -15.1171875, "text": "<image>" }, { "id": 32000, "logprob": -15.90625, "text": "<image>" }, { "id": 32000, "logprob": -12.828125, "text": "<image>" }, { "id": 32000, "logprob": -12.7109375, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -13.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.859375, "text": "<image>" }, { "id": 32000, "logprob": -11.6953125, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -10.59375, "text": "<image>" }, { "id": 32000, "logprob": -10.8828125, "text": "<image>" }, { "id": 32000, "logprob": -11.34375, "text": "<image>" }, { "id": 32000, "logprob": -12.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.515625, "text": "<image>" }, { "id": 32000, "logprob": -12.6796875, "text": "<image>" }, { "id": 32000, "logprob": -10.7578125, "text": "<image>" }, { "id": 32000, "logprob": -11.40625, "text": "<image>" }, { "id": 32000, "logprob": -10.75, "text": "<image>" }, { "id": 32000, "logprob": -11.2109375, "text": "<image>" }, { "id": 32000, "logprob": -10.875, "text": "<image>" }, { "id": 32000, "logprob": -13.1328125, "text": "<image>" }, { "id": 32000, "logprob": -10.53125, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -12.5234375, "text": "<image>" }, { "id": 32000, "logprob": -17.0, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -11.3203125, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -11.2421875, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -11.4765625, "text": "<image>" }, { "id": 32000, "logprob": -11.34375, "text": "<image>" }, { "id": 32000, "logprob": -12.6875, "text": "<image>" }, { "id": 32000, "logprob": -13.421875, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -13.46875, "text": "<image>" }, { "id": 32000, "logprob": -12.3125, "text": "<image>" }, { "id": 32000, "logprob": -11.6015625, "text": "<image>" }, { "id": 32000, "logprob": -13.359375, "text": "<image>" }, { "id": 32000, "logprob": -13.0625, "text": "<image>" }, { "id": 32000, "logprob": -11.2421875, "text": "<image>" }, { "id": 32000, "logprob": -15.34375, "text": "<image>" }, { "id": 32000, "logprob": -13.5, "text": "<image>" }, { "id": 32000, "logprob": -10.7734375, "text": "<image>" }, { "id": 32000, "logprob": -13.5390625, "text": "<image>" }, { "id": 32000, "logprob": -12.703125, "text": "<image>" }, { "id": 32000, "logprob": -13.1953125, "text": "<image>" }, { "id": 32000, "logprob": -17.171875, "text": "<image>" }, { "id": 32000, "logprob": -15.0234375, "text": "<image>" }, { "id": 32000, "logprob": -10.9609375, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -11.515625, "text": "<image>" }, { "id": 32000, "logprob": -16.125, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.609375, "text": "<image>" }, { "id": 32000, "logprob": -14.6015625, "text": "<image>" }, { "id": 32000, "logprob": -11.8984375, "text": "<image>" }, { "id": 32000, "logprob": -10.9375, "text": "<image>" }, { "id": 32000, "logprob": -13.3203125, "text": "<image>" }, { "id": 32000, "logprob": -11.78125, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -12.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.0, "text": "<image>" }, { "id": 32000, "logprob": -13.796875, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -12.4140625, "text": "<image>" }, { "id": 32000, "logprob": -14.3203125, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -13.875, "text": "<image>" }, { "id": 32000, "logprob": -14.3203125, "text": "<image>" }, { "id": 32000, "logprob": -12.375, "text": "<image>" }, { "id": 32000, "logprob": -13.9375, "text": "<image>" }, { "id": 32000, "logprob": -12.3203125, "text": "<image>" }, { "id": 32000, "logprob": -14.609375, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -10.6484375, "text": "<image>" }, { "id": 32000, "logprob": -12.0, "text": "<image>" }, { "id": 32000, "logprob": -11.0703125, "text": "<image>" }, { "id": 32000, "logprob": -11.5234375, "text": "<image>" }, { "id": 32000, "logprob": -11.5625, "text": "<image>" }, { "id": 32000, "logprob": -11.7265625, "text": "<image>" }, { "id": 32000, "logprob": -12.6640625, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -12.1171875, "text": "<image>" }, { "id": 32000, "logprob": -16.09375, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -13.4140625, "text": "<image>" }, { "id": 32000, "logprob": -13.453125, "text": "<image>" }, { "id": 32000, "logprob": -15.4921875, "text": "<image>" }, { "id": 32000, "logprob": -12.9140625, "text": "<image>" }, { "id": 32000, "logprob": -14.8125, "text": "<image>" }, { "id": 32000, "logprob": -13.375, "text": "<image>" }, { "id": 32000, "logprob": -12.0703125, "text": "<image>" }, { "id": 32000, "logprob": -15.234375, "text": "<image>" }, { "id": 32000, "logprob": -11.6875, "text": "<image>" }, { "id": 32000, "logprob": -10.5390625, "text": "<image>" }, { "id": 32000, "logprob": -14.703125, "text": "<image>" }, { "id": 32000, "logprob": -13.9140625, "text": "<image>" }, { "id": 32000, "logprob": -11.6875, "text": "<image>" }, { "id": 32000, "logprob": -17.453125, "text": "<image>" }, { "id": 32000, "logprob": -12.2578125, "text": "<image>" }, { "id": 32000, "logprob": -10.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.7890625, "text": "<image>" }, { "id": 32000, "logprob": -11.5, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -14.265625, "text": "<image>" }, { "id": 32000, "logprob": -13.6953125, "text": "<image>" }, { "id": 32000, "logprob": -9.796875, "text": "<image>" }, { "id": 32000, "logprob": -10.59375, "text": "<image>" }, { "id": 32000, "logprob": -12.890625, "text": "<image>" }, { "id": 32000, "logprob": -11.9375, "text": "<image>" }, { "id": 32000, "logprob": -14.734375, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -14.7265625, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -12.7890625, "text": "<image>" }, { "id": 32000, "logprob": -11.015625, "text": "<image>" }, { "id": 32000, "logprob": -16.421875, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -11.46875, "text": "<image>" }, { "id": 32000, "logprob": -16.53125, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -13.3515625, "text": "<image>" }, { "id": 32000, "logprob": -13.96875, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.84375, "text": "<image>" }, { "id": 32000, "logprob": -11.96875, "text": "<image>" }, { "id": 32000, "logprob": -12.515625, "text": "<image>" }, { "id": 32000, "logprob": -12.6328125, "text": "<image>" }, { "id": 32000, "logprob": -11.8046875, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -14.046875, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -17.203125, "text": "<image>" }, { "id": 32000, "logprob": -13.1640625, "text": "<image>" }, { "id": 32000, "logprob": -13.375, "text": "<image>" }, { "id": 32000, "logprob": -14.609375, "text": "<image>" }, { "id": 32000, "logprob": -12.953125, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -15.0234375, "text": "<image>" }, { "id": 32000, "logprob": -11.390625, "text": "<image>" }, { "id": 32000, "logprob": -15.9296875, "text": "<image>" }, { "id": 32000, "logprob": -10.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.828125, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -12.75, "text": "<image>" }, { "id": 32000, "logprob": -14.7734375, "text": "<image>" }, { "id": 32000, "logprob": -11.484375, "text": "<image>" }, { "id": 32000, "logprob": -11.2578125, "text": "<image>" }, { "id": 32000, "logprob": -10.734375, "text": "<image>" }, { "id": 32000, "logprob": -10.875, "text": "<image>" }, { "id": 32000, "logprob": -11.421875, "text": "<image>" }, { "id": 32000, "logprob": -12.96875, "text": "<image>" }, { "id": 32000, "logprob": -13.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.03125, "text": "<image>" }, { "id": 32000, "logprob": -17.015625, "text": "<image>" }, { "id": 32000, "logprob": -12.2109375, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -13.09375, "text": "<image>" }, { "id": 32000, "logprob": -11.84375, "text": "<image>" }, { "id": 32000, "logprob": -15.2578125, "text": "<image>" }, { "id": 32000, "logprob": -16.984375, "text": "<image>" }, { "id": 32000, "logprob": -11.0546875, "text": "<image>" }, { "id": 32000, "logprob": -11.4609375, "text": "<image>" }, { "id": 32000, "logprob": -11.8046875, "text": "<image>" }, { "id": 32000, "logprob": -11.140625, "text": "<image>" }, { "id": 32000, "logprob": -10.6875, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -17.984375, "text": "<image>" }, { "id": 32000, "logprob": -12.4140625, "text": "<image>" }, { "id": 32000, "logprob": -14.703125, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -14.4609375, "text": "<image>" }, { "id": 32000, "logprob": -11.7109375, "text": "<image>" }, { "id": 32000, "logprob": -12.828125, "text": "<image>" }, { "id": 32000, "logprob": -12.8359375, "text": "<image>" }, { "id": 32000, "logprob": -11.484375, "text": "<image>" }, { "id": 32000, "logprob": -13.5390625, "text": "<image>" }, { "id": 32000, "logprob": -16.21875, "text": "<image>" }, { "id": 32000, "logprob": -16.84375, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -14.359375, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -13.6953125, "text": "<image>" }, { "id": 32000, "logprob": -12.984375, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -17.59375, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -15.046875, "text": "<image>" }, { "id": 32000, "logprob": -16.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -13.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -11.3203125, "text": "<image>" }, { "id": 32000, "logprob": -11.359375, "text": "<image>" }, { "id": 32000, "logprob": -12.46875, "text": "<image>" }, { "id": 32000, "logprob": -11.5546875, "text": "<image>" }, { "id": 32000, "logprob": -11.3125, "text": "<image>" }, { "id": 32000, "logprob": -13.1640625, "text": "<image>" }, { "id": 32000, "logprob": -14.5859375, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -11.765625, "text": "<image>" }, { "id": 32000, "logprob": -13.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.4296875, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -12.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.09375, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -11.40625, "text": "<image>" }, { "id": 32000, "logprob": -12.875, "text": "<image>" }, { "id": 32000, "logprob": -11.9609375, "text": "<image>" }, { "id": 32000, "logprob": -13.078125, "text": "<image>" }, { "id": 32000, "logprob": -12.625, "text": "<image>" }, { "id": 32000, "logprob": -11.609375, "text": "<image>" }, { "id": 32000, "logprob": -11.1328125, "text": "<image>" }, { "id": 32000, "logprob": -15.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.0859375, "text": "<image>" }, { "id": 32000, "logprob": -14.25, "text": "<image>" }, { "id": 32000, "logprob": -12.2265625, "text": "<image>" }, { "id": 32000, "logprob": -16.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.8125, "text": "<image>" }, { "id": 32000, "logprob": -12.7890625, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -12.3125, "text": "<image>" }, { "id": 32000, "logprob": -12.3203125, "text": "<image>" }, { "id": 32000, "logprob": -11.9609375, "text": "<image>" }, { "id": 32000, "logprob": -14.2734375, "text": "<image>" }, { "id": 32000, "logprob": -14.90625, "text": "<image>" }, { "id": 32000, "logprob": -12.8125, "text": "<image>" }, { "id": 32000, "logprob": -14.546875, "text": "<image>" }, { "id": 32000, "logprob": -19.4375, "text": "<image>" }, { "id": 32000, "logprob": -12.2109375, "text": "<image>" }, { "id": 32000, "logprob": -13.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.4453125, "text": "<image>" }, { "id": 32000, "logprob": -12.2265625, "text": "<image>" }, { "id": 32000, "logprob": -12.6640625, "text": "<image>" }, { "id": 32000, "logprob": -12.3359375, "text": "<image>" }, { "id": 32000, "logprob": -17.640625, "text": "<image>" }, { "id": 32000, "logprob": -17.734375, "text": "<image>" }, { "id": 32000, "logprob": -11.515625, "text": "<image>" }, { "id": 32000, "logprob": -12.0078125, "text": "<image>" }, { "id": 32000, "logprob": -13.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.5703125, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.328125, "text": "<image>" }, { "id": 32000, "logprob": -13.484375, "text": "<image>" }, { "id": 32000, "logprob": -10.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -12.9375, "text": "<image>" }, { "id": 32000, "logprob": -13.2890625, "text": "<image>" }, { "id": 32000, "logprob": -14.34375, "text": "<image>" }, { "id": 32000, "logprob": -14.5, "text": "<image>" }, { "id": 32000, "logprob": -11.578125, "text": "<image>" }, { "id": 32000, "logprob": -11.6953125, "text": "<image>" }, { "id": 32000, "logprob": -14.875, "text": "<image>" }, { "id": 32000, "logprob": -11.03125, "text": "<image>" }, { "id": 32000, "logprob": -11.34375, "text": "<image>" }, { "id": 32000, "logprob": -11.75, "text": "<image>" }, { "id": 32000, "logprob": -11.7265625, "text": "<image>" }, { "id": 32000, "logprob": -11.8984375, "text": "<image>" }, { "id": 32000, "logprob": -11.9296875, "text": "<image>" }, { "id": 32000, "logprob": -11.328125, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -11.5859375, "text": "<image>" }, { "id": 32000, "logprob": -15.046875, "text": "<image>" }, { "id": 32000, "logprob": -12.2109375, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -16.875, "text": "<image>" }, { "id": 32000, "logprob": -12.1640625, "text": "<image>" }, { "id": 32000, "logprob": -14.7890625, "text": "<image>" }, { "id": 32000, "logprob": -11.9609375, "text": "<image>" }, { "id": 32000, "logprob": -12.796875, "text": "<image>" }, { "id": 32000, "logprob": -11.984375, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -12.703125, "text": "<image>" }, { "id": 32000, "logprob": -14.4765625, "text": "<image>" }, { "id": 32000, "logprob": -10.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.0234375, "text": "<image>" }, { "id": 32000, "logprob": -11.5625, "text": "<image>" }, { "id": 32000, "logprob": -14.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.8125, "text": "<image>" }, { "id": 32000, "logprob": -12.4453125, "text": "<image>" }, { "id": 32000, "logprob": -12.6640625, "text": "<image>" }, { "id": 32000, "logprob": -12.0078125, "text": "<image>" }, { "id": 32000, "logprob": -12.1640625, "text": "<image>" }, { "id": 32000, "logprob": -14.875, "text": "<image>" }, { "id": 32000, "logprob": -11.59375, "text": "<image>" }, { "id": 32000, "logprob": -10.75, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -13.265625, "text": "<image>" }, { "id": 32000, "logprob": -11.1328125, "text": "<image>" }, { "id": 32000, "logprob": -16.921875, "text": "<image>" }, { "id": 32000, "logprob": -12.8828125, "text": "<image>" }, { "id": 32000, "logprob": -13.53125, "text": "<image>" }, { "id": 32000, "logprob": -14.625, "text": "<image>" }, { "id": 32000, "logprob": -10.9375, "text": "<image>" }, { "id": 32000, "logprob": -11.390625, "text": "<image>" }, { "id": 32000, "logprob": -17.09375, "text": "<image>" }, { "id": 32000, "logprob": -13.7421875, "text": "<image>" }, { "id": 32000, "logprob": -12.84375, "text": "<image>" }, { "id": 32000, "logprob": -11.2734375, "text": "<image>" }, { "id": 32000, "logprob": -11.734375, "text": "<image>" }, { "id": 32000, "logprob": -11.2109375, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -14.328125, "text": "<image>" }, { "id": 32000, "logprob": -11.9375, "text": "<image>" }, { "id": 32000, "logprob": -13.1875, "text": "<image>" }, { "id": 32000, "logprob": -11.421875, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -11.078125, "text": "<image>" }, { "id": 32000, "logprob": -11.609375, "text": "<image>" }, { "id": 32000, "logprob": -12.6328125, "text": "<image>" }, { "id": 32000, "logprob": -10.890625, "text": "<image>" }, { "id": 32000, "logprob": -12.71875, "text": "<image>" }, { "id": 32000, "logprob": -13.453125, "text": "<image>" }, { "id": 32000, "logprob": -12.4140625, "text": "<image>" }, { "id": 32000, "logprob": -12.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -16.546875, "text": "<image>" }, { "id": 32000, "logprob": -13.15625, "text": "<image>" }, { "id": 32000, "logprob": -11.0546875, "text": "<image>" }, { "id": 32000, "logprob": -13.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.828125, "text": "<image>" }, { "id": 32000, "logprob": -14.3515625, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -12.84375, "text": "<image>" }, { "id": 32000, "logprob": -12.9296875, "text": "<image>" }, { "id": 32000, "logprob": -11.46875, "text": "<image>" }, { "id": 32000, "logprob": -11.625, "text": "<image>" }, { "id": 32000, "logprob": -14.125, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -14.703125, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -10.75, "text": "<image>" }, { "id": 32000, "logprob": -12.78125, "text": "<image>" }, { "id": 32000, "logprob": -15.140625, "text": "<image>" }, { "id": 32000, "logprob": -13.59375, "text": "<image>" }, { "id": 32000, "logprob": -11.6953125, "text": "<image>" }, { "id": 32000, "logprob": -11.390625, "text": "<image>" }, { "id": 32000, "logprob": -15.1953125, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.765625, "text": "<image>" }, { "id": 32000, "logprob": -12.921875, "text": "<image>" }, { "id": 32000, "logprob": -13.453125, "text": "<image>" }, { "id": 32000, "logprob": -15.5078125, "text": "<image>" }, { "id": 32000, "logprob": -12.3984375, "text": "<image>" }, { "id": 32000, "logprob": -13.78125, "text": "<image>" }, { "id": 32000, "logprob": -12.1875, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -12.8984375, "text": "<image>" }, { "id": 32000, "logprob": -11.7109375, "text": "<image>" }, { "id": 32000, "logprob": -11.734375, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -16.375, "text": "<image>" }, { "id": 32000, "logprob": -12.5, "text": "<image>" }, { "id": 32000, "logprob": -11.9609375, "text": "<image>" }, { "id": 32000, "logprob": -13.875, "text": "<image>" }, { "id": 32000, "logprob": -15.0, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -16.03125, "text": "<image>" }, { "id": 32000, "logprob": -11.734375, "text": "<image>" }, { "id": 32000, "logprob": -13.5859375, "text": "<image>" }, { "id": 32000, "logprob": -15.3359375, "text": "<image>" }, { "id": 32000, "logprob": -12.2421875, "text": "<image>" }, { "id": 32000, "logprob": -11.703125, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -16.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -14.578125, "text": "<image>" }, { "id": 32000, "logprob": -11.625, "text": "<image>" }, { "id": 32000, "logprob": -12.109375, "text": "<image>" }, { "id": 32000, "logprob": -12.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -15.8203125, "text": "<image>" }, { "id": 32000, "logprob": -17.703125, "text": "<image>" }, { "id": 32000, "logprob": -12.09375, "text": "<image>" }, { "id": 32000, "logprob": -11.828125, "text": "<image>" }, { "id": 32000, "logprob": -11.703125, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -13.53125, "text": "<image>" }, { "id": 32000, "logprob": -11.140625, "text": "<image>" }, { "id": 32000, "logprob": -16.75, "text": "<image>" }, { "id": 32000, "logprob": -13.0625, "text": "<image>" }, { "id": 32000, "logprob": -14.90625, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -15.1640625, "text": "<image>" }, { "id": 32000, "logprob": -11.828125, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.28125, "text": "<image>" }, { "id": 32000, "logprob": -16.953125, "text": "<image>" }, { "id": 32000, "logprob": -13.0546875, "text": "<image>" }, { "id": 32000, "logprob": -14.890625, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -11.375, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -12.34375, "text": "<image>" }, { "id": 32000, "logprob": -11.84375, "text": "<image>" }, { "id": 32000, "logprob": -13.7578125, "text": "<image>" }, { "id": 32000, "logprob": -12.6875, "text": "<image>" }, { "id": 32000, "logprob": -14.71875, "text": "<image>" }, { "id": 32000, "logprob": -12.578125, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -11.6875, "text": "<image>" }, { "id": 32000, "logprob": -11.40625, "text": "<image>" }, { "id": 32000, "logprob": -11.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.8046875, "text": "<image>" }, { "id": 32000, "logprob": -12.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -13.5234375, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -11.765625, "text": "<image>" }, { "id": 32000, "logprob": -12.421875, "text": "<image>" }, { "id": 32000, "logprob": -18.65625, "text": "<image>" }, { "id": 32000, "logprob": -15.6796875, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -13.6953125, "text": "<image>" }, { "id": 32000, "logprob": -15.0859375, "text": "<image>" }, { "id": 32000, "logprob": -14.0703125, "text": "<image>" }, { "id": 32000, "logprob": -14.53125, "text": "<image>" }, { "id": 32000, "logprob": -12.359375, "text": "<image>" }, { "id": 32000, "logprob": -12.0625, "text": "<image>" }, { "id": 32000, "logprob": -13.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.8203125, "text": "<image>" }, { "id": 32000, "logprob": -13.046875, "text": "<image>" }, { "id": 32000, "logprob": -13.078125, "text": "<image>" }, { "id": 32000, "logprob": -12.625, "text": "<image>" }, { "id": 32000, "logprob": -11.3203125, "text": "<image>" }, { "id": 32000, "logprob": -14.2109375, "text": "<image>" }, { "id": 32000, "logprob": -11.734375, "text": "<image>" }, { "id": 32000, "logprob": -13.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.578125, "text": "<image>" }, { "id": 32000, "logprob": -11.703125, "text": "<image>" }, { "id": 32000, "logprob": -10.65625, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -11.8984375, "text": "<image>" }, { "id": 32000, "logprob": -11.7578125, "text": "<image>" }, { "id": 32000, "logprob": -12.890625, "text": "<image>" }, { "id": 32000, "logprob": -11.453125, "text": "<image>" }, { "id": 32000, "logprob": -14.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.953125, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -12.53125, "text": "<image>" }, { "id": 32000, "logprob": -10.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.2265625, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -13.5546875, "text": "<image>" }, { "id": 32000, "logprob": -16.6875, "text": "<image>" }, { "id": 32000, "logprob": -11.96875, "text": "<image>" }, { "id": 32000, "logprob": -12.8203125, "text": "<image>" }, { "id": 32000, "logprob": -14.9140625, "text": "<image>" }, { "id": 32000, "logprob": -16.5625, "text": "<image>" }, { "id": 32000, "logprob": -15.171875, "text": "<image>" }, { "id": 32000, "logprob": -14.6796875, "text": "<image>" }, { "id": 32000, "logprob": -12.03125, "text": "<image>" }, { "id": 32000, "logprob": -12.9375, "text": "<image>" }, { "id": 32000, "logprob": -12.2109375, "text": "<image>" }, { "id": 32000, "logprob": -12.7421875, "text": "<image>" }, { "id": 32000, "logprob": -13.3515625, "text": "<image>" }, { "id": 32000, "logprob": -13.7890625, "text": "<image>" }, { "id": 32000, "logprob": -12.5, "text": "<image>" }, { "id": 32000, "logprob": -12.140625, "text": "<image>" }, { "id": 32000, "logprob": -15.4453125, "text": "<image>" }, { "id": 32000, "logprob": -14.640625, "text": "<image>" }, { "id": 32000, "logprob": -13.6171875, "text": "<image>" }, { "id": 32000, "logprob": -13.796875, "text": "<image>" }, { "id": 32000, "logprob": -12.7109375, "text": "<image>" }, { "id": 32000, "logprob": -13.2578125, "text": "<image>" }, { "id": 32000, "logprob": -12.96875, "text": "<image>" }, { "id": 32000, "logprob": -13.5390625, "text": "<image>" }, { "id": 32000, "logprob": -13.265625, "text": "<image>" }, { "id": 32000, "logprob": -12.4453125, "text": "<image>" }, { "id": 32000, "logprob": -11.390625, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -16.5625, "text": "<image>" }, { "id": 32000, "logprob": -14.171875, "text": "<image>" }, { "id": 32000, "logprob": -13.2265625, "text": "<image>" }, { "id": 32000, "logprob": -11.7890625, "text": "<image>" }, { "id": 32000, "logprob": -15.1171875, "text": "<image>" }, { "id": 32000, "logprob": -15.4453125, "text": "<image>" }, { "id": 32000, "logprob": -12.6015625, "text": "<image>" }, { "id": 32000, "logprob": -11.4921875, "text": "<image>" }, { "id": 32000, "logprob": -10.703125, "text": "<image>" }, { "id": 32000, "logprob": -11.15625, "text": "<image>" }, { "id": 32000, "logprob": -12.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.734375, "text": "<image>" }, { "id": 32000, "logprob": -17.953125, "text": "<image>" }, { "id": 32000, "logprob": -11.59375, "text": "<image>" }, { "id": 32000, "logprob": -11.625, "text": "<image>" }, { "id": 32000, "logprob": -14.921875, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.8515625, "text": "<image>" }, { "id": 32000, "logprob": -15.3046875, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -12.7265625, "text": "<image>" }, { "id": 32000, "logprob": -15.140625, "text": "<image>" }, { "id": 32000, "logprob": -14.9375, "text": "<image>" }, { "id": 32000, "logprob": -15.6875, "text": "<image>" }, { "id": 32000, "logprob": -16.015625, "text": "<image>" }, { "id": 32000, "logprob": -14.125, "text": "<image>" }, { "id": 32000, "logprob": -13.2265625, "text": "<image>" }, { "id": 32000, "logprob": -13.1640625, "text": "<image>" }, { "id": 32000, "logprob": -12.703125, "text": "<image>" }, { "id": 32000, "logprob": -11.5625, "text": "<image>" }, { "id": 32000, "logprob": -14.765625, "text": "<image>" }, { "id": 32000, "logprob": -14.3515625, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.9296875, "text": "<image>" }, { "id": 32000, "logprob": -15.296875, "text": "<image>" }, { "id": 32000, "logprob": -13.6328125, "text": "<image>" }, { "id": 32000, "logprob": -11.4296875, "text": "<image>" }, { "id": 32000, "logprob": -13.5078125, "text": "<image>" }, { "id": 32000, "logprob": -10.890625, "text": "<image>" }, { "id": 32000, "logprob": -14.1171875, "text": "<image>" }, { "id": 32000, "logprob": -12.15625, "text": "<image>" }, { "id": 32000, "logprob": -13.1171875, "text": "<image>" }, { "id": 32000, "logprob": -12.5625, "text": "<image>" }, { "id": 32000, "logprob": -13.359375, "text": "<image>" }, { "id": 32000, "logprob": -11.734375, "text": "<image>" }, { "id": 32000, "logprob": -10.953125, "text": "<image>" }, { "id": 32000, "logprob": -12.75, "text": "<image>" }, { "id": 32000, "logprob": -12.7734375, "text": "<image>" }, { "id": 32000, "logprob": -13.5, "text": "<image>" }, { "id": 32000, "logprob": -11.578125, "text": "<image>" }, { "id": 32000, "logprob": -11.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.5390625, "text": "<image>" }, { "id": 32000, "logprob": -13.3984375, "text": "<image>" }, { "id": 32000, "logprob": -19.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -11.609375, "text": "<image>" }, { "id": 32000, "logprob": -13.890625, "text": "<image>" }, { "id": 32000, "logprob": -13.3203125, "text": "<image>" }, { "id": 32000, "logprob": -12.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.0625, "text": "<image>" }, { "id": 32000, "logprob": -11.6953125, "text": "<image>" }, { "id": 32000, "logprob": -13.7265625, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -13.9140625, "text": "<image>" }, { "id": 32000, "logprob": -11.7109375, "text": "<image>" }, { "id": 32000, "logprob": -16.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.6953125, "text": "<image>" }, { "id": 32000, "logprob": -13.4765625, "text": "<image>" }, { "id": 32000, "logprob": -15.2421875, "text": "<image>" }, { "id": 32000, "logprob": -11.53125, "text": "<image>" }, { "id": 32000, "logprob": -14.578125, "text": "<image>" }, { "id": 32000, "logprob": -15.28125, "text": "<image>" }, { "id": 32000, "logprob": -14.0390625, "text": "<image>" }, { "id": 32000, "logprob": -12.9296875, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.296875, "text": "<image>" }, { "id": 32000, "logprob": -14.2421875, "text": "<image>" }, { "id": 32000, "logprob": -12.0703125, "text": "<image>" }, { "id": 32000, "logprob": -12.609375, "text": "<image>" }, { "id": 32000, "logprob": -13.5546875, "text": "<image>" }, { "id": 32000, "logprob": -11.265625, "text": "<image>" }, { "id": 32000, "logprob": -11.7421875, "text": "<image>" }, { "id": 32000, "logprob": -15.0546875, "text": "<image>" }, { "id": 32000, "logprob": -14.390625, "text": "<image>" }, { "id": 32000, "logprob": -11.1796875, "text": "<image>" }, { "id": 32000, "logprob": -13.1171875, "text": "<image>" }, { "id": 32000, "logprob": -11.421875, "text": "<image>" }, { "id": 32000, "logprob": -15.8203125, "text": "<image>" }, { "id": 32000, "logprob": -12.7734375, "text": "<image>" }, { "id": 32000, "logprob": -13.3125, "text": "<image>" }, { "id": 32000, "logprob": -11.4453125, "text": "<image>" }, { "id": 32000, "logprob": -14.2578125, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -12.546875, "text": "<image>" }, { "id": 32000, "logprob": -16.34375, "text": "<image>" }, { "id": 32000, "logprob": -13.875, "text": "<image>" }, { "id": 32000, "logprob": -14.6484375, "text": "<image>" }, { "id": 32000, "logprob": -12.6015625, "text": "<image>" }, { "id": 32000, "logprob": -11.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.515625, "text": "<image>" }, { "id": 32000, "logprob": -12.7578125, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -14.5703125, "text": "<image>" }, { "id": 32000, "logprob": -11.3515625, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -10.9296875, "text": "<image>" }, { "id": 32000, "logprob": -13.203125, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -12.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.953125, "text": "<image>" }, { "id": 32000, "logprob": -12.90625, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -11.4453125, "text": "<image>" }, { "id": 32000, "logprob": -14.9453125, "text": "<image>" }, { "id": 32000, "logprob": -13.625, "text": "<image>" }, { "id": 32000, "logprob": -11.2890625, "text": "<image>" }, { "id": 32000, "logprob": -12.9609375, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -12.5703125, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -12.328125, "text": "<image>" }, { "id": 32000, "logprob": -15.1875, "text": "<image>" }, { "id": 32000, "logprob": -11.7421875, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -15.0625, "text": "<image>" }, { "id": 32000, "logprob": -15.5703125, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.3359375, "text": "<image>" }, { "id": 32000, "logprob": -14.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.6171875, "text": "<image>" }, { "id": 32000, "logprob": -14.6640625, "text": "<image>" }, { "id": 32000, "logprob": -15.34375, "text": "<image>" }, { "id": 32000, "logprob": -15.2109375, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -13.703125, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -11.7890625, "text": "<image>" }, { "id": 32000, "logprob": -12.0, "text": "<image>" }, { "id": 32000, "logprob": -12.8828125, "text": "<image>" }, { "id": 32000, "logprob": -16.375, "text": "<image>" }, { "id": 32000, "logprob": -12.78125, "text": "<image>" }, { "id": 32000, "logprob": -12.4453125, "text": "<image>" }, { "id": 32000, "logprob": -13.3828125, "text": "<image>" }, { "id": 32000, "logprob": -13.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.8828125, "text": "<image>" }, { "id": 32000, "logprob": -11.453125, "text": "<image>" }, { "id": 32000, "logprob": -12.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.84375, "text": "<image>" }, { "id": 32000, "logprob": -11.2734375, "text": "<image>" }, { "id": 32000, "logprob": -14.71875, "text": "<image>" }, { "id": 32000, "logprob": -14.265625, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -13.6015625, "text": "<image>" }, { "id": 32000, "logprob": -13.6484375, "text": "<image>" }, { "id": 32000, "logprob": -11.3046875, "text": "<image>" }, { "id": 32000, "logprob": -11.96875, "text": "<image>" }, { "id": 32000, "logprob": -13.3984375, "text": "<image>" }, { "id": 32000, "logprob": -12.921875, "text": "<image>" }, { "id": 32000, "logprob": -14.2109375, "text": "<image>" }, { "id": 32000, "logprob": -12.875, "text": "<image>" }, { "id": 32000, "logprob": -13.875, "text": "<image>" }, { "id": 32000, "logprob": -13.359375, "text": "<image>" }, { "id": 32000, "logprob": -13.2109375, "text": "<image>" }, { "id": 32000, "logprob": -11.984375, "text": "<image>" }, { "id": 32000, "logprob": -15.140625, "text": "<image>" }, { "id": 32000, "logprob": -12.015625, "text": "<image>" }, { "id": 32000, "logprob": -11.03125, "text": "<image>" }, { "id": 32000, "logprob": -14.40625, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -13.7734375, "text": "<image>" }, { "id": 32000, "logprob": -13.5546875, "text": "<image>" }, { "id": 32000, "logprob": -11.7265625, "text": "<image>" }, { "id": 32000, "logprob": -13.390625, "text": "<image>" }, { "id": 32000, "logprob": -16.84375, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -17.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -12.71875, "text": "<image>" }, { "id": 32000, "logprob": -16.0, "text": "<image>" }, { "id": 32000, "logprob": -16.625, "text": "<image>" }, { "id": 32000, "logprob": -12.4921875, "text": "<image>" }, { "id": 2418, "logprob": -19.0625, "text": "Can" }, { "id": 368, "logprob": -0.19604492, "text": "you" }, { "id": 1912, "logprob": -1.5058594, "text": "tell" }, { "id": 528, "logprob": -0.31030273, "text": "me" }, { "id": 264, "logprob": -2.6367188, "text": "a" }, { "id": 1215, "logprob": -9.1015625, "text": "very" }, { "id": 2485, "logprob": -0.9975586, "text": "short" }, { "id": 2838, "logprob": -0.4633789, "text": "story" }, { "id": 2818, "logprob": -3.3144531, "text": "based" }, { "id": 356, "logprob": -0.029037476, "text": "on" }, { "id": 272, "logprob": -0.9902344, "text": "the" }, { "id": 3469, "logprob": -0.2890625, "text": "image" }, { "id": 28804, "logprob": -0.42895508, "text": "?" } ], "seed": null, "tokens": [ { "id": 13, "logprob": -0.007621765, "special": false, "text": "\n" }, { "id": 13, "logprob": -0.20275879, "special": false, "text": "\n" }, { "id": 16114, "logprob": -1.2578125, "special": false, "text": "Once" }, { "id": 3714, "logprob": -0.2084961, "special": false, "text": " upon" }, { "id": 264, "logprob": -0.0017738342, "special": false, "text": " a" }, { "id": 727, "logprob": -0.011932373, "special": false, "text": " time" }, { "id": 28725, "logprob": -0.17297363, "special": false, "text": "," }, { "id": 736, "logprob": -0.9057617, "special": false, "text": " there" }, { "id": 403, "logprob": -0.05758667, "special": false, "text": " was" }, { "id": 264, "logprob": -0.00970459, "special": false, "text": " a" } ], "top_tokens": null }, "generated_text": "\n\nOnce upon a time, there was a" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 1247, "logprob": -2.390625, "text": "User" }, { "id": 28747, "logprob": -12.328125, "text": ":" }, { "id": 32000, "logprob": -10.4375, "text": "<image>" }, { "id": 32000, "logprob": -10.6953125, "text": "<image>" }, { "id": 32000, "logprob": -15.828125, "text": "<image>" }, { "id": 32000, "logprob": -10.40625, "text": "<image>" }, { "id": 32000, "logprob": -10.03125, "text": "<image>" }, { "id": 32000, "logprob": -10.140625, "text": "<image>" }, { "id": 32000, "logprob": -10.3125, "text": "<image>" }, { "id": 32000, "logprob": -12.6640625, "text": "<image>" }, { "id": 32000, "logprob": -15.4921875, "text": "<image>" }, { "id": 32000, "logprob": -13.8671875, "text": "<image>" }, { "id": 32000, "logprob": -11.3671875, "text": "<image>" }, { "id": 32000, "logprob": -10.1015625, "text": "<image>" }, { "id": 32000, "logprob": -10.1796875, "text": "<image>" }, { "id": 32000, "logprob": -10.2421875, "text": "<image>" }, { "id": 32000, "logprob": -10.4609375, "text": "<image>" }, { "id": 32000, "logprob": -14.34375, "text": "<image>" }, { "id": 32000, "logprob": -13.1796875, "text": "<image>" }, { "id": 32000, "logprob": -13.328125, "text": "<image>" }, { "id": 32000, "logprob": -14.3515625, "text": "<image>" }, { "id": 32000, "logprob": -11.0546875, "text": "<image>" }, { "id": 32000, "logprob": -10.9140625, "text": "<image>" }, { "id": 32000, "logprob": -10.515625, "text": "<image>" }, { "id": 32000, "logprob": -10.4453125, "text": "<image>" }, { "id": 32000, "logprob": -14.2421875, "text": "<image>" }, { "id": 32000, "logprob": -13.59375, "text": "<image>" }, { "id": 32000, "logprob": -11.234375, "text": "<image>" }, { "id": 32000, "logprob": -10.8125, "text": "<image>" }, { "id": 32000, "logprob": -17.234375, "text": "<image>" }, { "id": 32000, "logprob": -10.3984375, "text": "<image>" }, { "id": 32000, "logprob": -10.390625, "text": "<image>" }, { "id": 32000, "logprob": -10.6484375, "text": "<image>" }, { "id": 32000, "logprob": -17.984375, "text": "<image>" }, { "id": 32000, "logprob": -10.84375, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -9.8671875, "text": "<image>" }, { "id": 32000, "logprob": -10.7578125, "text": "<image>" }, { "id": 32000, "logprob": -10.5390625, "text": "<image>" }, { "id": 32000, "logprob": -10.984375, "text": "<image>" }, { "id": 32000, "logprob": -10.625, "text": "<image>" }, { "id": 32000, "logprob": -11.109375, "text": "<image>" }, { "id": 32000, "logprob": -10.5, "text": "<image>" }, { "id": 32000, "logprob": -11.4609375, "text": "<image>" }, { "id": 32000, "logprob": -13.09375, "text": "<image>" }, { "id": 32000, "logprob": -10.8203125, "text": "<image>" }, { "id": 32000, "logprob": -10.5390625, "text": "<image>" }, { "id": 32000, "logprob": -10.625, "text": "<image>" }, { "id": 32000, "logprob": -10.9296875, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -10.578125, "text": "<image>" }, { "id": 32000, "logprob": -10.7265625, "text": "<image>" }, { "id": 32000, "logprob": -11.40625, "text": "<image>" }, { "id": 32000, "logprob": -10.359375, "text": "<image>" }, { "id": 32000, "logprob": -11.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.0390625, "text": "<image>" }, { "id": 32000, "logprob": -18.703125, "text": "<image>" }, { "id": 32000, "logprob": -11.015625, "text": "<image>" }, { "id": 32000, "logprob": -15.6796875, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -10.4140625, "text": "<image>" }, { "id": 32000, "logprob": -13.015625, "text": "<image>" }, { "id": 32000, "logprob": -11.8046875, "text": "<image>" }, { "id": 32000, "logprob": -10.5703125, "text": "<image>" }, { "id": 32000, "logprob": -10.8125, "text": "<image>" }, { "id": 32000, "logprob": -10.8046875, "text": "<image>" }, { "id": 32000, "logprob": -10.2734375, "text": "<image>" }, { "id": 32000, "logprob": -10.234375, "text": "<image>" }, { "id": 32000, "logprob": -10.265625, "text": "<image>" }, { "id": 32000, "logprob": -11.140625, "text": "<image>" }, { "id": 32000, "logprob": -12.8125, "text": "<image>" }, { "id": 32000, "logprob": -10.671875, "text": "<image>" }, { "id": 32000, "logprob": -11.40625, "text": "<image>" }, { "id": 32000, "logprob": -12.8046875, "text": "<image>" }, { "id": 32000, "logprob": -10.8125, "text": "<image>" }, { "id": 32000, "logprob": -11.15625, "text": "<image>" }, { "id": 32000, "logprob": -10.5234375, "text": "<image>" }, { "id": 32000, "logprob": -15.3203125, "text": "<image>" }, { "id": 32000, "logprob": -10.2265625, "text": "<image>" }, { "id": 32000, "logprob": -11.0546875, "text": "<image>" }, { "id": 32000, "logprob": -14.671875, "text": "<image>" }, { "id": 32000, "logprob": -10.375, "text": "<image>" }, { "id": 32000, "logprob": -11.34375, "text": "<image>" }, { "id": 32000, "logprob": -12.5234375, "text": "<image>" }, { "id": 32000, "logprob": -12.4921875, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -11.0859375, "text": "<image>" }, { "id": 32000, "logprob": -10.9765625, "text": "<image>" }, { "id": 32000, "logprob": -10.921875, "text": "<image>" }, { "id": 32000, "logprob": -11.0234375, "text": "<image>" }, { "id": 32000, "logprob": -10.96875, "text": "<image>" }, { "id": 32000, "logprob": -11.1484375, "text": "<image>" }, { "id": 32000, "logprob": -10.71875, "text": "<image>" }, { "id": 32000, "logprob": -10.734375, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -10.890625, "text": "<image>" }, { "id": 32000, "logprob": -13.375, "text": "<image>" }, { "id": 32000, "logprob": -10.984375, "text": "<image>" }, { "id": 32000, "logprob": -10.6640625, "text": "<image>" }, { "id": 32000, "logprob": -10.7265625, "text": "<image>" }, { "id": 32000, "logprob": -10.75, "text": "<image>" }, { "id": 32000, "logprob": -10.640625, "text": "<image>" }, { "id": 32000, "logprob": -10.6796875, "text": "<image>" }, { "id": 32000, "logprob": -11.828125, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -12.2265625, "text": "<image>" }, { "id": 32000, "logprob": -10.9765625, "text": "<image>" }, { "id": 32000, "logprob": -12.5, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -11.2421875, "text": "<image>" }, { "id": 32000, "logprob": -11.890625, "text": "<image>" }, { "id": 32000, "logprob": -11.2265625, "text": "<image>" }, { "id": 32000, "logprob": -9.984375, "text": "<image>" }, { "id": 32000, "logprob": -10.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -11.109375, "text": "<image>" }, { "id": 32000, "logprob": -14.03125, "text": "<image>" }, { "id": 32000, "logprob": -10.8984375, "text": "<image>" }, { "id": 32000, "logprob": -10.953125, "text": "<image>" }, { "id": 32000, "logprob": -10.9296875, "text": "<image>" }, { "id": 32000, "logprob": -11.78125, "text": "<image>" }, { "id": 32000, "logprob": -15.1484375, "text": "<image>" }, { "id": 32000, "logprob": -10.96875, "text": "<image>" }, { "id": 32000, "logprob": -10.890625, "text": "<image>" }, { "id": 32000, "logprob": -10.890625, "text": "<image>" }, { "id": 32000, "logprob": -10.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.453125, "text": "<image>" }, { "id": 32000, "logprob": -13.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.484375, "text": "<image>" }, { "id": 32000, "logprob": -11.359375, "text": "<image>" }, { "id": 32000, "logprob": -12.3359375, "text": "<image>" }, { "id": 32000, "logprob": -11.59375, "text": "<image>" }, { "id": 32000, "logprob": -12.5, "text": "<image>" }, { "id": 32000, "logprob": -12.6328125, "text": "<image>" }, { "id": 32000, "logprob": -11.203125, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -10.9296875, "text": "<image>" }, { "id": 32000, "logprob": -10.7421875, "text": "<image>" }, { "id": 32000, "logprob": -15.390625, "text": "<image>" }, { "id": 32000, "logprob": -15.0234375, "text": "<image>" }, { "id": 32000, "logprob": -12.75, "text": "<image>" }, { "id": 32000, "logprob": -12.421875, "text": "<image>" }, { "id": 32000, "logprob": -12.2109375, "text": "<image>" }, { "id": 32000, "logprob": -11.265625, "text": "<image>" }, { "id": 32000, "logprob": -13.359375, "text": "<image>" }, { "id": 32000, "logprob": -10.640625, "text": "<image>" }, { "id": 32000, "logprob": -10.5859375, "text": "<image>" }, { "id": 32000, "logprob": -11.7578125, "text": "<image>" }, { "id": 32000, "logprob": -11.734375, "text": "<image>" }, { "id": 32000, "logprob": -10.421875, "text": "<image>" }, { "id": 32000, "logprob": -10.5859375, "text": "<image>" }, { "id": 32000, "logprob": -10.4609375, "text": "<image>" }, { "id": 32000, "logprob": -17.53125, "text": "<image>" }, { "id": 32000, "logprob": -11.4765625, "text": "<image>" }, { "id": 32000, "logprob": -14.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.0078125, "text": "<image>" }, { "id": 32000, "logprob": -12.8359375, "text": "<image>" }, { "id": 32000, "logprob": -11.25, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -12.6875, "text": "<image>" }, { "id": 32000, "logprob": -11.109375, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -10.3359375, "text": "<image>" }, { "id": 32000, "logprob": -13.46875, "text": "<image>" }, { "id": 32000, "logprob": -11.8671875, "text": "<image>" }, { "id": 32000, "logprob": -10.84375, "text": "<image>" }, { "id": 32000, "logprob": -11.0078125, "text": "<image>" }, { "id": 32000, "logprob": -11.234375, "text": "<image>" }, { "id": 32000, "logprob": -13.3359375, "text": "<image>" }, { "id": 32000, "logprob": -10.5546875, "text": "<image>" }, { "id": 32000, "logprob": -10.6796875, "text": "<image>" }, { "id": 32000, "logprob": -10.8203125, "text": "<image>" }, { "id": 32000, "logprob": -10.9921875, "text": "<image>" }, { "id": 32000, "logprob": -10.9921875, "text": "<image>" }, { "id": 32000, "logprob": -10.6328125, "text": "<image>" }, { "id": 32000, "logprob": -12.2578125, "text": "<image>" }, { "id": 32000, "logprob": -10.8203125, "text": "<image>" }, { "id": 32000, "logprob": -12.265625, "text": "<image>" }, { "id": 32000, "logprob": -12.25, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -10.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.8125, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -12.828125, "text": "<image>" }, { "id": 32000, "logprob": -11.328125, "text": "<image>" }, { "id": 32000, "logprob": -10.296875, "text": "<image>" }, { "id": 32000, "logprob": -10.796875, "text": "<image>" }, { "id": 32000, "logprob": -11.0390625, "text": "<image>" }, { "id": 32000, "logprob": -11.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.2109375, "text": "<image>" }, { "id": 32000, "logprob": -11.5078125, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -10.5546875, "text": "<image>" }, { "id": 32000, "logprob": -10.3515625, "text": "<image>" }, { "id": 32000, "logprob": -10.609375, "text": "<image>" }, { "id": 32000, "logprob": -10.796875, "text": "<image>" }, { "id": 32000, "logprob": -10.828125, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -11.9375, "text": "<image>" }, { "id": 32000, "logprob": -12.7109375, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -11.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -13.3984375, "text": "<image>" }, { "id": 32000, "logprob": -11.53125, "text": "<image>" }, { "id": 32000, "logprob": -14.1875, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -12.421875, "text": "<image>" }, { "id": 32000, "logprob": -10.90625, "text": "<image>" }, { "id": 32000, "logprob": -10.5859375, "text": "<image>" }, { "id": 32000, "logprob": -10.6015625, "text": "<image>" }, { "id": 32000, "logprob": -10.90625, "text": "<image>" }, { "id": 32000, "logprob": -11.2265625, "text": "<image>" }, { "id": 32000, "logprob": -10.90625, "text": "<image>" }, { "id": 32000, "logprob": -10.9765625, "text": "<image>" }, { "id": 32000, "logprob": -12.03125, "text": "<image>" }, { "id": 32000, "logprob": -10.8671875, "text": "<image>" }, { "id": 32000, "logprob": -10.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.8984375, "text": "<image>" }, { "id": 32000, "logprob": -10.7421875, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -11.4375, "text": "<image>" }, { "id": 32000, "logprob": -11.28125, "text": "<image>" }, { "id": 32000, "logprob": -10.9296875, "text": "<image>" }, { "id": 32000, "logprob": -11.5078125, "text": "<image>" }, { "id": 32000, "logprob": -13.578125, "text": "<image>" }, { "id": 32000, "logprob": -13.875, "text": "<image>" }, { "id": 32000, "logprob": -14.3828125, "text": "<image>" }, { "id": 32000, "logprob": -11.3515625, "text": "<image>" }, { "id": 32000, "logprob": -13.3125, "text": "<image>" }, { "id": 32000, "logprob": -10.8828125, "text": "<image>" }, { "id": 32000, "logprob": -12.703125, "text": "<image>" }, { "id": 32000, "logprob": -10.953125, "text": "<image>" }, { "id": 32000, "logprob": -10.7734375, "text": "<image>" }, { "id": 32000, "logprob": -12.5, "text": "<image>" }, { "id": 32000, "logprob": -11.390625, "text": "<image>" }, { "id": 32000, "logprob": -11.140625, "text": "<image>" }, { "id": 32000, "logprob": -10.8515625, "text": "<image>" }, { "id": 32000, "logprob": -12.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.75, "text": "<image>" }, { "id": 32000, "logprob": -10.8671875, "text": "<image>" }, { "id": 32000, "logprob": -13.8046875, "text": "<image>" }, { "id": 32000, "logprob": -11.828125, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -11.3984375, "text": "<image>" }, { "id": 32000, "logprob": -14.2890625, "text": "<image>" }, { "id": 32000, "logprob": -13.578125, "text": "<image>" }, { "id": 32000, "logprob": -13.46875, "text": "<image>" }, { "id": 32000, "logprob": -11.8828125, "text": "<image>" }, { "id": 32000, "logprob": -12.84375, "text": "<image>" }, { "id": 32000, "logprob": -14.90625, "text": "<image>" }, { "id": 32000, "logprob": -13.3125, "text": "<image>" }, { "id": 32000, "logprob": -11.140625, "text": "<image>" }, { "id": 32000, "logprob": -13.0703125, "text": "<image>" }, { "id": 32000, "logprob": -12.4296875, "text": "<image>" }, { "id": 32000, "logprob": -12.8671875, "text": "<image>" }, { "id": 32000, "logprob": -11.1953125, "text": "<image>" }, { "id": 32000, "logprob": -15.140625, "text": "<image>" }, { "id": 32000, "logprob": -12.625, "text": "<image>" }, { "id": 32000, "logprob": -13.1875, "text": "<image>" }, { "id": 32000, "logprob": -12.0859375, "text": "<image>" }, { "id": 32000, "logprob": -12.1875, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -11.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.9375, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -11.7421875, "text": "<image>" }, { "id": 32000, "logprob": -11.828125, "text": "<image>" }, { "id": 32000, "logprob": -12.1953125, "text": "<image>" }, { "id": 32000, "logprob": -12.4921875, "text": "<image>" }, { "id": 32000, "logprob": -11.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.0859375, "text": "<image>" }, { "id": 32000, "logprob": -11.265625, "text": "<image>" }, { "id": 32000, "logprob": -11.6953125, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -13.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.7109375, "text": "<image>" }, { "id": 32000, "logprob": -15.03125, "text": "<image>" }, { "id": 32000, "logprob": -12.5390625, "text": "<image>" }, { "id": 32000, "logprob": -11.859375, "text": "<image>" }, { "id": 32000, "logprob": -13.1171875, "text": "<image>" }, { "id": 32000, "logprob": -12.296875, "text": "<image>" }, { "id": 32000, "logprob": -12.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.1640625, "text": "<image>" }, { "id": 32000, "logprob": -12.96875, "text": "<image>" }, { "id": 32000, "logprob": -11.5078125, "text": "<image>" }, { "id": 32000, "logprob": -14.921875, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.6484375, "text": "<image>" }, { "id": 32000, "logprob": -11.7890625, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -14.015625, "text": "<image>" }, { "id": 32000, "logprob": -13.046875, "text": "<image>" }, { "id": 32000, "logprob": -11.7265625, "text": "<image>" }, { "id": 32000, "logprob": -13.1484375, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -13.8515625, "text": "<image>" }, { "id": 32000, "logprob": -13.78125, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -13.359375, "text": "<image>" }, { "id": 32000, "logprob": -13.2421875, "text": "<image>" }, { "id": 32000, "logprob": -13.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.859375, "text": "<image>" }, { "id": 32000, "logprob": -13.5625, "text": "<image>" }, { "id": 32000, "logprob": -13.6484375, "text": "<image>" }, { "id": 32000, "logprob": -13.6640625, "text": "<image>" }, { "id": 32000, "logprob": -11.4609375, "text": "<image>" }, { "id": 32000, "logprob": -13.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -14.6328125, "text": "<image>" }, { "id": 32000, "logprob": -11.78125, "text": "<image>" }, { "id": 32000, "logprob": -13.3984375, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -14.40625, "text": "<image>" }, { "id": 32000, "logprob": -13.5390625, "text": "<image>" }, { "id": 32000, "logprob": -14.5078125, "text": "<image>" }, { "id": 32000, "logprob": -12.7109375, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -13.1640625, "text": "<image>" }, { "id": 32000, "logprob": -13.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.875, "text": "<image>" }, { "id": 32000, "logprob": -12.1484375, "text": "<image>" }, { "id": 32000, "logprob": -12.6484375, "text": "<image>" }, { "id": 32000, "logprob": -13.6953125, "text": "<image>" }, { "id": 32000, "logprob": -12.4140625, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -13.109375, "text": "<image>" }, { "id": 32000, "logprob": -12.1796875, "text": "<image>" }, { "id": 32000, "logprob": -14.875, "text": "<image>" }, { "id": 32000, "logprob": -16.359375, "text": "<image>" }, { "id": 32000, "logprob": -11.34375, "text": "<image>" }, { "id": 32000, "logprob": -11.75, "text": "<image>" }, { "id": 32000, "logprob": -11.7890625, "text": "<image>" }, { "id": 32000, "logprob": -15.765625, "text": "<image>" }, { "id": 32000, "logprob": -15.0859375, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -11.875, "text": "<image>" }, { "id": 32000, "logprob": -13.25, "text": "<image>" }, { "id": 32000, "logprob": -13.5078125, "text": "<image>" }, { "id": 32000, "logprob": -13.1640625, "text": "<image>" }, { "id": 32000, "logprob": -14.265625, "text": "<image>" }, { "id": 32000, "logprob": -14.5859375, "text": "<image>" }, { "id": 32000, "logprob": -13.2421875, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -13.8046875, "text": "<image>" }, { "id": 32000, "logprob": -12.703125, "text": "<image>" }, { "id": 32000, "logprob": -12.0, "text": "<image>" }, { "id": 32000, "logprob": -12.2421875, "text": "<image>" }, { "id": 32000, "logprob": -16.203125, "text": "<image>" }, { "id": 32000, "logprob": -11.5859375, "text": "<image>" }, { "id": 32000, "logprob": -15.78125, "text": "<image>" }, { "id": 32000, "logprob": -12.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.953125, "text": "<image>" }, { "id": 32000, "logprob": -11.0, "text": "<image>" }, { "id": 32000, "logprob": -11.7109375, "text": "<image>" }, { "id": 32000, "logprob": -14.6875, "text": "<image>" }, { "id": 32000, "logprob": -11.34375, "text": "<image>" }, { "id": 32000, "logprob": -13.296875, "text": "<image>" }, { "id": 32000, "logprob": -12.609375, "text": "<image>" }, { "id": 32000, "logprob": -14.2265625, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -14.1640625, "text": "<image>" }, { "id": 32000, "logprob": -12.0234375, "text": "<image>" }, { "id": 32000, "logprob": -12.1953125, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.890625, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -15.1953125, "text": "<image>" }, { "id": 32000, "logprob": -11.546875, "text": "<image>" }, { "id": 32000, "logprob": -14.4375, "text": "<image>" }, { "id": 32000, "logprob": -12.265625, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.875, "text": "<image>" }, { "id": 32000, "logprob": -13.7265625, "text": "<image>" }, { "id": 32000, "logprob": -13.5390625, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -13.046875, "text": "<image>" }, { "id": 32000, "logprob": -13.953125, "text": "<image>" }, { "id": 32000, "logprob": -10.9765625, "text": "<image>" }, { "id": 32000, "logprob": -12.5078125, "text": "<image>" }, { "id": 32000, "logprob": -12.7109375, "text": "<image>" }, { "id": 32000, "logprob": -12.1796875, "text": "<image>" }, { "id": 32000, "logprob": -12.578125, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -13.4609375, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -13.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.75, "text": "<image>" }, { "id": 32000, "logprob": -15.34375, "text": "<image>" }, { "id": 32000, "logprob": -13.1875, "text": "<image>" }, { "id": 32000, "logprob": -10.3359375, "text": "<image>" }, { "id": 32000, "logprob": -13.3515625, "text": "<image>" }, { "id": 32000, "logprob": -11.8828125, "text": "<image>" }, { "id": 32000, "logprob": -12.578125, "text": "<image>" }, { "id": 32000, "logprob": -13.5078125, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -10.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.6015625, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -11.78125, "text": "<image>" }, { "id": 32000, "logprob": -11.984375, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -13.390625, "text": "<image>" }, { "id": 32000, "logprob": -12.96875, "text": "<image>" }, { "id": 32000, "logprob": -12.484375, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -13.296875, "text": "<image>" }, { "id": 32000, "logprob": -12.1171875, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -13.1328125, "text": "<image>" }, { "id": 32000, "logprob": -13.625, "text": "<image>" }, { "id": 32000, "logprob": -11.421875, "text": "<image>" }, { "id": 32000, "logprob": -11.75, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -12.3359375, "text": "<image>" }, { "id": 32000, "logprob": -12.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -13.96875, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -13.484375, "text": "<image>" }, { "id": 32000, "logprob": -12.828125, "text": "<image>" }, { "id": 32000, "logprob": -13.1640625, "text": "<image>" }, { "id": 32000, "logprob": -13.1640625, "text": "<image>" }, { "id": 32000, "logprob": -12.7734375, "text": "<image>" }, { "id": 32000, "logprob": -11.7421875, "text": "<image>" }, { "id": 32000, "logprob": -12.375, "text": "<image>" }, { "id": 32000, "logprob": -12.265625, "text": "<image>" }, { "id": 32000, "logprob": -14.6328125, "text": "<image>" }, { "id": 32000, "logprob": -12.921875, "text": "<image>" }, { "id": 32000, "logprob": -12.625, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.3828125, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.0703125, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -12.6171875, "text": "<image>" }, { "id": 32000, "logprob": -13.84375, "text": "<image>" }, { "id": 32000, "logprob": -12.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.140625, "text": "<image>" }, { "id": 32000, "logprob": -11.5078125, "text": "<image>" }, { "id": 32000, "logprob": -11.2734375, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -13.5859375, "text": "<image>" }, { "id": 32000, "logprob": -11.7890625, "text": "<image>" }, { "id": 32000, "logprob": -11.875, "text": "<image>" }, { "id": 32000, "logprob": -13.0625, "text": "<image>" }, { "id": 32000, "logprob": -13.1875, "text": "<image>" }, { "id": 32000, "logprob": -13.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.46875, "text": "<image>" }, { "id": 32000, "logprob": -12.34375, "text": "<image>" }, { "id": 32000, "logprob": -12.6953125, "text": "<image>" }, { "id": 32000, "logprob": -11.6875, "text": "<image>" }, { "id": 32000, "logprob": -11.6953125, "text": "<image>" }, { "id": 32000, "logprob": -11.7109375, "text": "<image>" }, { "id": 32000, "logprob": -15.3359375, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -16.125, "text": "<image>" }, { "id": 32000, "logprob": -15.4609375, "text": "<image>" }, { "id": 32000, "logprob": -13.921875, "text": "<image>" }, { "id": 32000, "logprob": -11.609375, "text": "<image>" }, { "id": 32000, "logprob": -11.78125, "text": "<image>" }, { "id": 32000, "logprob": -16.0, "text": "<image>" }, { "id": 32000, "logprob": -11.6484375, "text": "<image>" }, { "id": 32000, "logprob": -12.828125, "text": "<image>" }, { "id": 32000, "logprob": -12.5078125, "text": "<image>" }, { "id": 32000, "logprob": -11.734375, "text": "<image>" }, { "id": 32000, "logprob": -14.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.34375, "text": "<image>" }, { "id": 32000, "logprob": -11.7265625, "text": "<image>" }, { "id": 32000, "logprob": -13.046875, "text": "<image>" }, { "id": 32000, "logprob": -14.8984375, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -14.265625, "text": "<image>" }, { "id": 32000, "logprob": -12.109375, "text": "<image>" }, { "id": 32000, "logprob": -12.6953125, "text": "<image>" }, { "id": 32000, "logprob": -12.1953125, "text": "<image>" }, { "id": 32000, "logprob": -12.984375, "text": "<image>" }, { "id": 32000, "logprob": -13.2890625, "text": "<image>" }, { "id": 32000, "logprob": -13.0390625, "text": "<image>" }, { "id": 32000, "logprob": -11.96875, "text": "<image>" }, { "id": 32000, "logprob": -10.3984375, "text": "<image>" }, { "id": 32000, "logprob": -11.53125, "text": "<image>" }, { "id": 32000, "logprob": -14.0546875, "text": "<image>" }, { "id": 32000, "logprob": -11.5234375, "text": "<image>" }, { "id": 32000, "logprob": -11.109375, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -12.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.1796875, "text": "<image>" }, { "id": 32000, "logprob": -13.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.796875, "text": "<image>" }, { "id": 32000, "logprob": -11.625, "text": "<image>" }, { "id": 32000, "logprob": -13.59375, "text": "<image>" }, { "id": 32000, "logprob": -11.890625, "text": "<image>" }, { "id": 32000, "logprob": -14.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.5703125, "text": "<image>" }, { "id": 32000, "logprob": -12.7734375, "text": "<image>" }, { "id": 32000, "logprob": -11.375, "text": "<image>" }, { "id": 32000, "logprob": -14.03125, "text": "<image>" }, { "id": 32000, "logprob": -11.5546875, "text": "<image>" }, { "id": 32000, "logprob": -11.7578125, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -14.53125, "text": "<image>" }, { "id": 32000, "logprob": -12.609375, "text": "<image>" }, { "id": 32000, "logprob": -14.609375, "text": "<image>" }, { "id": 32000, "logprob": -13.59375, "text": "<image>" }, { "id": 32000, "logprob": -13.2109375, "text": "<image>" }, { "id": 32000, "logprob": -12.7734375, "text": "<image>" }, { "id": 32000, "logprob": -12.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.6875, "text": "<image>" }, { "id": 32000, "logprob": -12.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.484375, "text": "<image>" }, { "id": 32000, "logprob": -15.609375, "text": "<image>" }, { "id": 32000, "logprob": -11.859375, "text": "<image>" }, { "id": 32000, "logprob": -13.078125, "text": "<image>" }, { "id": 32000, "logprob": -12.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -11.546875, "text": "<image>" }, { "id": 32000, "logprob": -12.1171875, "text": "<image>" }, { "id": 32000, "logprob": -11.578125, "text": "<image>" }, { "id": 32000, "logprob": -12.5, "text": "<image>" }, { "id": 32000, "logprob": -15.5390625, "text": "<image>" }, { "id": 32000, "logprob": -14.2578125, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -13.8671875, "text": "<image>" }, { "id": 32000, "logprob": -13.2265625, "text": "<image>" }, { "id": 32000, "logprob": -14.8046875, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -11.8671875, "text": "<image>" }, { "id": 32000, "logprob": -13.1484375, "text": "<image>" }, { "id": 32000, "logprob": -12.4140625, "text": "<image>" }, { "id": 32000, "logprob": -10.671875, "text": "<image>" }, { "id": 32000, "logprob": -14.6875, "text": "<image>" }, { "id": 32000, "logprob": -15.7734375, "text": "<image>" }, { "id": 32000, "logprob": -15.453125, "text": "<image>" }, { "id": 32000, "logprob": -11.234375, "text": "<image>" }, { "id": 32000, "logprob": -14.484375, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -16.234375, "text": "<image>" }, { "id": 32000, "logprob": -14.875, "text": "<image>" }, { "id": 32000, "logprob": -16.109375, "text": "<image>" }, { "id": 32000, "logprob": -13.2890625, "text": "<image>" }, { "id": 32000, "logprob": -12.5546875, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.9921875, "text": "<image>" }, { "id": 32000, "logprob": -11.2734375, "text": "<image>" }, { "id": 32000, "logprob": -12.1484375, "text": "<image>" }, { "id": 32000, "logprob": -13.84375, "text": "<image>" }, { "id": 32000, "logprob": -13.3984375, "text": "<image>" }, { "id": 32000, "logprob": -12.3203125, "text": "<image>" }, { "id": 32000, "logprob": -11.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -11.984375, "text": "<image>" }, { "id": 32000, "logprob": -15.140625, "text": "<image>" }, { "id": 32000, "logprob": -10.828125, "text": "<image>" }, { "id": 32000, "logprob": -13.171875, "text": "<image>" }, { "id": 32000, "logprob": -15.4453125, "text": "<image>" }, { "id": 32000, "logprob": -11.1015625, "text": "<image>" }, { "id": 32000, "logprob": -10.5078125, "text": "<image>" }, { "id": 32000, "logprob": -11.859375, "text": "<image>" }, { "id": 32000, "logprob": -11.0, "text": "<image>" }, { "id": 32000, "logprob": -11.2890625, "text": "<image>" }, { "id": 32000, "logprob": -11.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.4921875, "text": "<image>" }, { "id": 32000, "logprob": -16.46875, "text": "<image>" }, { "id": 32000, "logprob": -13.46875, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -11.1953125, "text": "<image>" }, { "id": 32000, "logprob": -11.3359375, "text": "<image>" }, { "id": 32000, "logprob": -12.1640625, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -11.8671875, "text": "<image>" }, { "id": 32000, "logprob": -9.9140625, "text": "<image>" }, { "id": 32000, "logprob": -11.265625, "text": "<image>" }, { "id": 32000, "logprob": -12.1875, "text": "<image>" }, { "id": 32000, "logprob": -14.265625, "text": "<image>" }, { "id": 32000, "logprob": -12.15625, "text": "<image>" }, { "id": 32000, "logprob": -11.65625, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -11.8984375, "text": "<image>" }, { "id": 32000, "logprob": -14.859375, "text": "<image>" }, { "id": 32000, "logprob": -14.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.8671875, "text": "<image>" }, { "id": 32000, "logprob": -10.59375, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -12.8984375, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -15.765625, "text": "<image>" }, { "id": 32000, "logprob": -15.203125, "text": "<image>" }, { "id": 32000, "logprob": -14.25, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -12.34375, "text": "<image>" }, { "id": 32000, "logprob": -12.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.0078125, "text": "<image>" }, { "id": 32000, "logprob": -11.296875, "text": "<image>" }, { "id": 32000, "logprob": -13.2734375, "text": "<image>" }, { "id": 32000, "logprob": -11.7578125, "text": "<image>" }, { "id": 32000, "logprob": -13.09375, "text": "<image>" }, { "id": 32000, "logprob": -13.6796875, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.421875, "text": "<image>" }, { "id": 32000, "logprob": -11.984375, "text": "<image>" }, { "id": 32000, "logprob": -12.2890625, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -13.125, "text": "<image>" }, { "id": 32000, "logprob": -15.9453125, "text": "<image>" }, { "id": 32000, "logprob": -13.140625, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.5234375, "text": "<image>" }, { "id": 32000, "logprob": -11.328125, "text": "<image>" }, { "id": 32000, "logprob": -11.5, "text": "<image>" }, { "id": 32000, "logprob": -11.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.3359375, "text": "<image>" }, { "id": 32000, "logprob": -9.7109375, "text": "<image>" }, { "id": 32000, "logprob": -11.765625, "text": "<image>" }, { "id": 32000, "logprob": -14.2421875, "text": "<image>" }, { "id": 32000, "logprob": -11.96875, "text": "<image>" }, { "id": 32000, "logprob": -12.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.5234375, "text": "<image>" }, { "id": 32000, "logprob": -12.1953125, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.84375, "text": "<image>" }, { "id": 32000, "logprob": -12.0, "text": "<image>" }, { "id": 32000, "logprob": -12.1953125, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.7109375, "text": "<image>" }, { "id": 32000, "logprob": -13.0859375, "text": "<image>" }, { "id": 32000, "logprob": -13.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.7421875, "text": "<image>" }, { "id": 32000, "logprob": -11.953125, "text": "<image>" }, { "id": 32000, "logprob": -13.8046875, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.0859375, "text": "<image>" }, { "id": 32000, "logprob": -10.015625, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -13.8125, "text": "<image>" }, { "id": 32000, "logprob": -12.109375, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -12.5703125, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -12.453125, "text": "<image>" }, { "id": 32000, "logprob": -12.2890625, "text": "<image>" }, { "id": 32000, "logprob": -12.46875, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.015625, "text": "<image>" }, { "id": 32000, "logprob": -12.765625, "text": "<image>" }, { "id": 32000, "logprob": -14.421875, "text": "<image>" }, { "id": 32000, "logprob": -13.25, "text": "<image>" }, { "id": 32000, "logprob": -16.53125, "text": "<image>" }, { "id": 32000, "logprob": -14.484375, "text": "<image>" }, { "id": 32000, "logprob": -13.7421875, "text": "<image>" }, { "id": 32000, "logprob": -13.46875, "text": "<image>" }, { "id": 32000, "logprob": -13.046875, "text": "<image>" }, { "id": 32000, "logprob": -12.8671875, "text": "<image>" }, { "id": 32000, "logprob": -12.4921875, "text": "<image>" }, { "id": 32000, "logprob": -13.2265625, "text": "<image>" }, { "id": 32000, "logprob": -13.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -10.765625, "text": "<image>" }, { "id": 32000, "logprob": -13.109375, "text": "<image>" }, { "id": 32000, "logprob": -12.8515625, "text": "<image>" }, { "id": 32000, "logprob": -14.7421875, "text": "<image>" }, { "id": 32000, "logprob": -12.84375, "text": "<image>" }, { "id": 32000, "logprob": -12.890625, "text": "<image>" }, { "id": 32000, "logprob": -12.609375, "text": "<image>" }, { "id": 32000, "logprob": -12.9296875, "text": "<image>" }, { "id": 32000, "logprob": -12.625, "text": "<image>" }, { "id": 32000, "logprob": -12.6171875, "text": "<image>" }, { "id": 32000, "logprob": -12.8671875, "text": "<image>" }, { "id": 32000, "logprob": -12.7265625, "text": "<image>" }, { "id": 32000, "logprob": -12.9765625, "text": "<image>" }, { "id": 32000, "logprob": -12.6484375, "text": "<image>" }, { "id": 32000, "logprob": -13.03125, "text": "<image>" }, { "id": 32000, "logprob": -13.4296875, "text": "<image>" }, { "id": 32000, "logprob": -13.2734375, "text": "<image>" }, { "id": 32000, "logprob": -12.9921875, "text": "<image>" }, { "id": 32000, "logprob": -11.5234375, "text": "<image>" }, { "id": 32000, "logprob": -15.015625, "text": "<image>" }, { "id": 32000, "logprob": -12.2109375, "text": "<image>" }, { "id": 32000, "logprob": -12.6953125, "text": "<image>" }, { "id": 32000, "logprob": -14.9375, "text": "<image>" }, { "id": 32000, "logprob": -12.6953125, "text": "<image>" }, { "id": 32000, "logprob": -10.78125, "text": "<image>" }, { "id": 32000, "logprob": -13.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.9921875, "text": "<image>" }, { "id": 32000, "logprob": -16.125, "text": "<image>" }, { "id": 32000, "logprob": -13.46875, "text": "<image>" }, { "id": 32000, "logprob": -13.7109375, "text": "<image>" }, { "id": 32000, "logprob": -13.6015625, "text": "<image>" }, { "id": 32000, "logprob": -13.25, "text": "<image>" }, { "id": 32000, "logprob": -13.2578125, "text": "<image>" }, { "id": 32000, "logprob": -13.5, "text": "<image>" }, { "id": 32000, "logprob": -13.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.6171875, "text": "<image>" }, { "id": 32000, "logprob": -14.2890625, "text": "<image>" }, { "id": 32000, "logprob": -14.4765625, "text": "<image>" }, { "id": 32000, "logprob": -13.1484375, "text": "<image>" }, { "id": 32000, "logprob": -17.0625, "text": "<image>" }, { "id": 32000, "logprob": -14.046875, "text": "<image>" }, { "id": 32000, "logprob": -14.2734375, "text": "<image>" }, { "id": 32000, "logprob": -12.625, "text": "<image>" }, { "id": 32000, "logprob": -12.5625, "text": "<image>" }, { "id": 32000, "logprob": -13.9140625, "text": "<image>" }, { "id": 32000, "logprob": -13.03125, "text": "<image>" }, { "id": 32000, "logprob": -10.5, "text": "<image>" }, { "id": 32000, "logprob": -13.1953125, "text": "<image>" }, { "id": 32000, "logprob": -13.53125, "text": "<image>" }, { "id": 32000, "logprob": -13.3828125, "text": "<image>" }, { "id": 32000, "logprob": -14.0625, "text": "<image>" }, { "id": 32000, "logprob": -13.5234375, "text": "<image>" }, { "id": 32000, "logprob": -14.15625, "text": "<image>" }, { "id": 32000, "logprob": -16.9375, "text": "<image>" }, { "id": 32000, "logprob": -14.015625, "text": "<image>" }, { "id": 32000, "logprob": -14.09375, "text": "<image>" }, { "id": 32000, "logprob": -13.828125, "text": "<image>" }, { "id": 32000, "logprob": -13.328125, "text": "<image>" }, { "id": 32000, "logprob": -13.0234375, "text": "<image>" }, { "id": 32000, "logprob": -13.375, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -14.7890625, "text": "<image>" }, { "id": 32000, "logprob": -13.53125, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.4609375, "text": "<image>" }, { "id": 32000, "logprob": -13.078125, "text": "<image>" }, { "id": 32000, "logprob": -12.3359375, "text": "<image>" }, { "id": 32000, "logprob": -13.6171875, "text": "<image>" }, { "id": 32000, "logprob": -12.75, "text": "<image>" }, { "id": 32000, "logprob": -14.859375, "text": "<image>" }, { "id": 32000, "logprob": -13.078125, "text": "<image>" }, { "id": 32000, "logprob": -14.171875, "text": "<image>" }, { "id": 32000, "logprob": -13.078125, "text": "<image>" }, { "id": 32000, "logprob": -14.4921875, "text": "<image>" }, { "id": 32000, "logprob": -18.15625, "text": "<image>" }, { "id": 32000, "logprob": -14.703125, "text": "<image>" }, { "id": 32000, "logprob": -14.71875, "text": "<image>" }, { "id": 32000, "logprob": -16.3125, "text": "<image>" }, { "id": 32000, "logprob": -14.9765625, "text": "<image>" }, { "id": 32000, "logprob": -13.8046875, "text": "<image>" }, { "id": 32000, "logprob": -15.375, "text": "<image>" }, { "id": 32000, "logprob": -13.3984375, "text": "<image>" }, { "id": 32000, "logprob": -14.578125, "text": "<image>" }, { "id": 32000, "logprob": -15.2890625, "text": "<image>" }, { "id": 32000, "logprob": -13.609375, "text": "<image>" }, { "id": 32000, "logprob": -13.328125, "text": "<image>" }, { "id": 32000, "logprob": -15.8359375, "text": "<image>" }, { "id": 32000, "logprob": -13.765625, "text": "<image>" }, { "id": 32000, "logprob": -16.90625, "text": "<image>" }, { "id": 32000, "logprob": -13.3203125, "text": "<image>" }, { "id": 32000, "logprob": -13.7734375, "text": "<image>" }, { "id": 32000, "logprob": -13.3203125, "text": "<image>" }, { "id": 32000, "logprob": -13.4765625, "text": "<image>" }, { "id": 32000, "logprob": -13.390625, "text": "<image>" }, { "id": 32000, "logprob": -13.2734375, "text": "<image>" }, { "id": 32000, "logprob": -13.046875, "text": "<image>" }, { "id": 32000, "logprob": -13.7421875, "text": "<image>" }, { "id": 32000, "logprob": -13.5, "text": "<image>" }, { "id": 32000, "logprob": -13.0390625, "text": "<image>" }, { "id": 32000, "logprob": -13.4375, "text": "<image>" }, { "id": 32000, "logprob": -13.7421875, "text": "<image>" }, { "id": 32000, "logprob": -13.6796875, "text": "<image>" }, { "id": 32000, "logprob": -13.546875, "text": "<image>" }, { "id": 32000, "logprob": -13.3203125, "text": "<image>" }, { "id": 32000, "logprob": -13.4453125, "text": "<image>" }, { "id": 32000, "logprob": -13.6171875, "text": "<image>" }, { "id": 32000, "logprob": -15.140625, "text": "<image>" }, { "id": 32000, "logprob": -13.21875, "text": "<image>" }, { "id": 32000, "logprob": -13.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.0234375, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -11.3125, "text": "<image>" }, { "id": 32000, "logprob": -10.65625, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -13.25, "text": "<image>" }, { "id": 32000, "logprob": -11.6328125, "text": "<image>" }, { "id": 32000, "logprob": -12.875, "text": "<image>" }, { "id": 32000, "logprob": -12.546875, "text": "<image>" }, { "id": 32000, "logprob": -12.8203125, "text": "<image>" }, { "id": 32000, "logprob": -13.9765625, "text": "<image>" }, { "id": 32000, "logprob": -13.1796875, "text": "<image>" }, { "id": 32000, "logprob": -12.984375, "text": "<image>" }, { "id": 32000, "logprob": -13.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.75, "text": "<image>" }, { "id": 32000, "logprob": -14.578125, "text": "<image>" }, { "id": 32000, "logprob": -14.625, "text": "<image>" }, { "id": 32000, "logprob": -14.734375, "text": "<image>" }, { "id": 32000, "logprob": -14.5078125, "text": "<image>" }, { "id": 32000, "logprob": -14.9140625, "text": "<image>" }, { "id": 32000, "logprob": -13.046875, "text": "<image>" }, { "id": 32000, "logprob": -13.453125, "text": "<image>" }, { "id": 32000, "logprob": -14.78125, "text": "<image>" }, { "id": 32000, "logprob": -14.9453125, "text": "<image>" }, { "id": 32000, "logprob": -13.9921875, "text": "<image>" }, { "id": 32000, "logprob": -13.4921875, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -14.4765625, "text": "<image>" }, { "id": 32000, "logprob": -15.9609375, "text": "<image>" }, { "id": 32000, "logprob": -14.234375, "text": "<image>" }, { "id": 32000, "logprob": -13.1328125, "text": "<image>" }, { "id": 32000, "logprob": -12.6640625, "text": "<image>" }, { "id": 32000, "logprob": -12.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.7734375, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.6796875, "text": "<image>" }, { "id": 32000, "logprob": -13.625, "text": "<image>" }, { "id": 32000, "logprob": -12.890625, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.7734375, "text": "<image>" }, { "id": 32000, "logprob": -14.265625, "text": "<image>" }, { "id": 32000, "logprob": -12.0234375, "text": "<image>" }, { "id": 32000, "logprob": -12.71875, "text": "<image>" }, { "id": 32000, "logprob": -13.0078125, "text": "<image>" }, { "id": 32000, "logprob": -12.8671875, "text": "<image>" }, { "id": 32000, "logprob": -15.078125, "text": "<image>" }, { "id": 32000, "logprob": -12.921875, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -10.5078125, "text": "<image>" }, { "id": 32000, "logprob": -10.265625, "text": "<image>" }, { "id": 32000, "logprob": -11.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.0703125, "text": "<image>" }, { "id": 32000, "logprob": -11.28125, "text": "<image>" }, { "id": 32000, "logprob": -11.046875, "text": "<image>" }, { "id": 32000, "logprob": -13.3203125, "text": "<image>" }, { "id": 32000, "logprob": -11.03125, "text": "<image>" }, { "id": 32000, "logprob": -12.53125, "text": "<image>" }, { "id": 32000, "logprob": -12.203125, "text": "<image>" }, { "id": 32000, "logprob": -12.015625, "text": "<image>" }, { "id": 32000, "logprob": -13.8125, "text": "<image>" }, { "id": 32000, "logprob": -12.0859375, "text": "<image>" }, { "id": 32000, "logprob": -14.1796875, "text": "<image>" }, { "id": 32000, "logprob": -14.0703125, "text": "<image>" }, { "id": 32000, "logprob": -14.03125, "text": "<image>" }, { "id": 32000, "logprob": -14.0625, "text": "<image>" }, { "id": 32000, "logprob": -13.0390625, "text": "<image>" }, { "id": 32000, "logprob": -12.8203125, "text": "<image>" }, { "id": 32000, "logprob": -14.453125, "text": "<image>" }, { "id": 32000, "logprob": -14.6875, "text": "<image>" }, { "id": 32000, "logprob": -14.3046875, "text": "<image>" }, { "id": 32000, "logprob": -14.8671875, "text": "<image>" }, { "id": 32000, "logprob": -14.1640625, "text": "<image>" }, { "id": 32000, "logprob": -13.203125, "text": "<image>" }, { "id": 32000, "logprob": -13.109375, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -13.3203125, "text": "<image>" }, { "id": 32000, "logprob": -16.140625, "text": "<image>" }, { "id": 32000, "logprob": -12.7109375, "text": "<image>" }, { "id": 32000, "logprob": -13.7109375, "text": "<image>" }, { "id": 32000, "logprob": -13.3984375, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -12.140625, "text": "<image>" }, { "id": 32000, "logprob": -12.3828125, "text": "<image>" }, { "id": 32000, "logprob": -12.875, "text": "<image>" }, { "id": 32000, "logprob": -13.6484375, "text": "<image>" }, { "id": 32000, "logprob": -13.0390625, "text": "<image>" }, { "id": 32000, "logprob": -13.3203125, "text": "<image>" }, { "id": 32000, "logprob": -12.6640625, "text": "<image>" }, { "id": 32000, "logprob": -12.34375, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.453125, "text": "<image>" }, { "id": 32000, "logprob": -12.015625, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -14.46875, "text": "<image>" }, { "id": 32000, "logprob": -13.3203125, "text": "<image>" }, { "id": 32000, "logprob": -12.71875, "text": "<image>" }, { "id": 32000, "logprob": -10.6484375, "text": "<image>" }, { "id": 32000, "logprob": -11.6484375, "text": "<image>" }, { "id": 32000, "logprob": -10.84375, "text": "<image>" }, { "id": 32000, "logprob": -10.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.2578125, "text": "<image>" }, { "id": 32000, "logprob": -11.7265625, "text": "<image>" }, { "id": 32000, "logprob": -10.8515625, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -13.140625, "text": "<image>" }, { "id": 32000, "logprob": -12.734375, "text": "<image>" }, { "id": 32000, "logprob": -12.1640625, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -13.1328125, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -12.84375, "text": "<image>" }, { "id": 32000, "logprob": -12.6640625, "text": "<image>" }, { "id": 32000, "logprob": -12.0859375, "text": "<image>" }, { "id": 32000, "logprob": -13.609375, "text": "<image>" }, { "id": 32000, "logprob": -12.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.3125, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -14.0546875, "text": "<image>" }, { "id": 32000, "logprob": -13.984375, "text": "<image>" }, { "id": 32000, "logprob": -13.7890625, "text": "<image>" }, { "id": 32000, "logprob": -13.2109375, "text": "<image>" }, { "id": 32000, "logprob": -13.484375, "text": "<image>" }, { "id": 32000, "logprob": -13.703125, "text": "<image>" }, { "id": 32000, "logprob": -12.59375, "text": "<image>" }, { "id": 32000, "logprob": -15.7890625, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.46875, "text": "<image>" }, { "id": 32000, "logprob": -12.0, "text": "<image>" }, { "id": 32000, "logprob": -11.3359375, "text": "<image>" }, { "id": 32000, "logprob": -11.4296875, "text": "<image>" }, { "id": 32000, "logprob": -11.8671875, "text": "<image>" }, { "id": 32000, "logprob": -13.828125, "text": "<image>" }, { "id": 32000, "logprob": -11.75, "text": "<image>" }, { "id": 32000, "logprob": -11.46875, "text": "<image>" }, { "id": 32000, "logprob": -11.4765625, "text": "<image>" }, { "id": 32000, "logprob": -11.6328125, "text": "<image>" }, { "id": 32000, "logprob": -11.625, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -15.2265625, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -13.3203125, "text": "<image>" }, { "id": 32000, "logprob": -15.2734375, "text": "<image>" }, { "id": 32000, "logprob": -11.1875, "text": "<image>" }, { "id": 32000, "logprob": -11.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -13.453125, "text": "<image>" }, { "id": 32000, "logprob": -13.0234375, "text": "<image>" }, { "id": 32000, "logprob": -11.53125, "text": "<image>" }, { "id": 32000, "logprob": -12.0078125, "text": "<image>" }, { "id": 32000, "logprob": -11.1875, "text": "<image>" }, { "id": 32000, "logprob": -11.890625, "text": "<image>" }, { "id": 32000, "logprob": -11.3125, "text": "<image>" }, { "id": 32000, "logprob": -11.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.4921875, "text": "<image>" }, { "id": 32000, "logprob": -12.0703125, "text": "<image>" }, { "id": 32000, "logprob": -13.3671875, "text": "<image>" }, { "id": 32000, "logprob": -13.28125, "text": "<image>" }, { "id": 32000, "logprob": -12.8359375, "text": "<image>" }, { "id": 32000, "logprob": -13.0390625, "text": "<image>" }, { "id": 32000, "logprob": -13.078125, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -13.484375, "text": "<image>" }, { "id": 32000, "logprob": -12.578125, "text": "<image>" }, { "id": 32000, "logprob": -13.703125, "text": "<image>" }, { "id": 32000, "logprob": -14.34375, "text": "<image>" }, { "id": 32000, "logprob": -14.3203125, "text": "<image>" }, { "id": 32000, "logprob": -12.7109375, "text": "<image>" }, { "id": 32000, "logprob": -14.28125, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -13.7890625, "text": "<image>" }, { "id": 32000, "logprob": -15.203125, "text": "<image>" }, { "id": 32000, "logprob": -13.28125, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -14.3203125, "text": "<image>" }, { "id": 32000, "logprob": -13.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.1171875, "text": "<image>" }, { "id": 32000, "logprob": -14.03125, "text": "<image>" }, { "id": 32000, "logprob": -14.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -11.953125, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.6953125, "text": "<image>" }, { "id": 32000, "logprob": -12.5390625, "text": "<image>" }, { "id": 32000, "logprob": -11.6484375, "text": "<image>" }, { "id": 32000, "logprob": -11.703125, "text": "<image>" }, { "id": 32000, "logprob": -11.65625, "text": "<image>" }, { "id": 32000, "logprob": -14.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -12.3359375, "text": "<image>" }, { "id": 32000, "logprob": -11.6328125, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -12.5234375, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -11.2734375, "text": "<image>" }, { "id": 32000, "logprob": -11.2578125, "text": "<image>" }, { "id": 32000, "logprob": -13.1640625, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -13.03125, "text": "<image>" }, { "id": 32000, "logprob": -11.5234375, "text": "<image>" }, { "id": 32000, "logprob": -14.015625, "text": "<image>" }, { "id": 32000, "logprob": -13.75, "text": "<image>" }, { "id": 32000, "logprob": -12.3359375, "text": "<image>" }, { "id": 32000, "logprob": -12.921875, "text": "<image>" }, { "id": 32000, "logprob": -13.2109375, "text": "<image>" }, { "id": 32000, "logprob": -12.8359375, "text": "<image>" }, { "id": 32000, "logprob": -13.0703125, "text": "<image>" }, { "id": 32000, "logprob": -11.78125, "text": "<image>" }, { "id": 32000, "logprob": -13.703125, "text": "<image>" }, { "id": 32000, "logprob": -14.46875, "text": "<image>" }, { "id": 32000, "logprob": -14.234375, "text": "<image>" }, { "id": 32000, "logprob": -13.6796875, "text": "<image>" }, { "id": 32000, "logprob": -13.8515625, "text": "<image>" }, { "id": 32000, "logprob": -12.3203125, "text": "<image>" }, { "id": 32000, "logprob": -14.25, "text": "<image>" }, { "id": 32000, "logprob": -14.28125, "text": "<image>" }, { "id": 32000, "logprob": -14.0, "text": "<image>" }, { "id": 32000, "logprob": -14.625, "text": "<image>" }, { "id": 32000, "logprob": -12.8515625, "text": "<image>" }, { "id": 32000, "logprob": -13.453125, "text": "<image>" }, { "id": 32000, "logprob": -15.4296875, "text": "<image>" }, { "id": 32000, "logprob": -13.1171875, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -14.671875, "text": "<image>" }, { "id": 32000, "logprob": -14.2578125, "text": "<image>" }, { "id": 32000, "logprob": -11.875, "text": "<image>" }, { "id": 32000, "logprob": -12.8359375, "text": "<image>" }, { "id": 32000, "logprob": -13.828125, "text": "<image>" }, { "id": 32000, "logprob": -13.59375, "text": "<image>" }, { "id": 32000, "logprob": -12.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -12.0078125, "text": "<image>" }, { "id": 32000, "logprob": -13.40625, "text": "<image>" }, { "id": 32000, "logprob": -14.8203125, "text": "<image>" }, { "id": 32000, "logprob": -11.046875, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.5234375, "text": "<image>" }, { "id": 32000, "logprob": -11.140625, "text": "<image>" }, { "id": 32000, "logprob": -10.7890625, "text": "<image>" }, { "id": 32000, "logprob": -10.9453125, "text": "<image>" }, { "id": 32000, "logprob": -10.890625, "text": "<image>" }, { "id": 32000, "logprob": -12.78125, "text": "<image>" }, { "id": 32000, "logprob": -11.1171875, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -11.1484375, "text": "<image>" }, { "id": 32000, "logprob": -11.15625, "text": "<image>" }, { "id": 32000, "logprob": -11.40625, "text": "<image>" }, { "id": 32000, "logprob": -12.734375, "text": "<image>" }, { "id": 32000, "logprob": -13.0859375, "text": "<image>" }, { "id": 32000, "logprob": -14.046875, "text": "<image>" }, { "id": 32000, "logprob": -12.9609375, "text": "<image>" }, { "id": 32000, "logprob": -12.34375, "text": "<image>" }, { "id": 32000, "logprob": -12.84375, "text": "<image>" }, { "id": 32000, "logprob": -13.9765625, "text": "<image>" }, { "id": 32000, "logprob": -12.6875, "text": "<image>" }, { "id": 32000, "logprob": -13.484375, "text": "<image>" }, { "id": 32000, "logprob": -13.53125, "text": "<image>" }, { "id": 32000, "logprob": -13.5859375, "text": "<image>" }, { "id": 32000, "logprob": -13.3828125, "text": "<image>" }, { "id": 32000, "logprob": -14.25, "text": "<image>" }, { "id": 32000, "logprob": -14.2578125, "text": "<image>" }, { "id": 32000, "logprob": -14.1640625, "text": "<image>" }, { "id": 32000, "logprob": -13.671875, "text": "<image>" }, { "id": 32000, "logprob": -14.2578125, "text": "<image>" }, { "id": 32000, "logprob": -13.8671875, "text": "<image>" }, { "id": 32000, "logprob": -12.921875, "text": "<image>" }, { "id": 32000, "logprob": -15.6484375, "text": "<image>" }, { "id": 32000, "logprob": -13.03125, "text": "<image>" }, { "id": 32000, "logprob": -12.34375, "text": "<image>" }, { "id": 32000, "logprob": -12.7265625, "text": "<image>" }, { "id": 32000, "logprob": -14.515625, "text": "<image>" }, { "id": 32000, "logprob": -12.75, "text": "<image>" }, { "id": 32000, "logprob": -12.265625, "text": "<image>" }, { "id": 32000, "logprob": -13.546875, "text": "<image>" }, { "id": 32000, "logprob": -13.1328125, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.5625, "text": "<image>" }, { "id": 32000, "logprob": -11.6640625, "text": "<image>" }, { "id": 32000, "logprob": -12.15625, "text": "<image>" }, { "id": 32000, "logprob": -11.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.390625, "text": "<image>" }, { "id": 32000, "logprob": -11.0546875, "text": "<image>" }, { "id": 32000, "logprob": -11.96875, "text": "<image>" }, { "id": 32000, "logprob": -12.0078125, "text": "<image>" }, { "id": 32000, "logprob": -12.984375, "text": "<image>" }, { "id": 32000, "logprob": -13.2578125, "text": "<image>" }, { "id": 32000, "logprob": -12.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.3515625, "text": "<image>" }, { "id": 32000, "logprob": -11.2578125, "text": "<image>" }, { "id": 32000, "logprob": -11.8828125, "text": "<image>" }, { "id": 32000, "logprob": -11.6484375, "text": "<image>" }, { "id": 32000, "logprob": -12.8125, "text": "<image>" }, { "id": 32000, "logprob": -11.15625, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -11.59375, "text": "<image>" }, { "id": 32000, "logprob": -13.6953125, "text": "<image>" }, { "id": 32000, "logprob": -12.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.0859375, "text": "<image>" }, { "id": 32000, "logprob": -12.03125, "text": "<image>" }, { "id": 32000, "logprob": -12.296875, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -13.84375, "text": "<image>" }, { "id": 32000, "logprob": -12.875, "text": "<image>" }, { "id": 32000, "logprob": -13.953125, "text": "<image>" }, { "id": 32000, "logprob": -13.53125, "text": "<image>" }, { "id": 32000, "logprob": -13.9609375, "text": "<image>" }, { "id": 32000, "logprob": -12.46875, "text": "<image>" }, { "id": 32000, "logprob": -13.3984375, "text": "<image>" }, { "id": 32000, "logprob": -14.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.921875, "text": "<image>" }, { "id": 32000, "logprob": -14.2578125, "text": "<image>" }, { "id": 32000, "logprob": -14.3046875, "text": "<image>" }, { "id": 32000, "logprob": -14.3984375, "text": "<image>" }, { "id": 32000, "logprob": -15.3203125, "text": "<image>" }, { "id": 32000, "logprob": -13.4140625, "text": "<image>" }, { "id": 32000, "logprob": -12.3515625, "text": "<image>" }, { "id": 32000, "logprob": -13.46875, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.2890625, "text": "<image>" }, { "id": 32000, "logprob": -14.3515625, "text": "<image>" }, { "id": 32000, "logprob": -13.140625, "text": "<image>" }, { "id": 32000, "logprob": -13.0234375, "text": "<image>" }, { "id": 32000, "logprob": -12.5625, "text": "<image>" }, { "id": 32000, "logprob": -11.8125, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -11.53125, "text": "<image>" }, { "id": 32000, "logprob": -12.2265625, "text": "<image>" }, { "id": 32000, "logprob": -11.2890625, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -11.46875, "text": "<image>" }, { "id": 32000, "logprob": -11.40625, "text": "<image>" }, { "id": 32000, "logprob": -12.03125, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.4453125, "text": "<image>" }, { "id": 32000, "logprob": -11.34375, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -11.96875, "text": "<image>" }, { "id": 32000, "logprob": -14.6015625, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.4765625, "text": "<image>" }, { "id": 32000, "logprob": -13.25, "text": "<image>" }, { "id": 32000, "logprob": -12.5390625, "text": "<image>" }, { "id": 32000, "logprob": -11.078125, "text": "<image>" }, { "id": 32000, "logprob": -11.5546875, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -11.75, "text": "<image>" }, { "id": 32000, "logprob": -12.6015625, "text": "<image>" }, { "id": 32000, "logprob": -14.3203125, "text": "<image>" }, { "id": 32000, "logprob": -13.15625, "text": "<image>" }, { "id": 32000, "logprob": -13.4453125, "text": "<image>" }, { "id": 32000, "logprob": -14.2890625, "text": "<image>" }, { "id": 32000, "logprob": -13.7890625, "text": "<image>" }, { "id": 32000, "logprob": -13.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.96875, "text": "<image>" }, { "id": 32000, "logprob": -13.46875, "text": "<image>" }, { "id": 32000, "logprob": -14.1171875, "text": "<image>" }, { "id": 32000, "logprob": -13.0703125, "text": "<image>" }, { "id": 32000, "logprob": -12.9609375, "text": "<image>" }, { "id": 32000, "logprob": -14.90625, "text": "<image>" }, { "id": 32000, "logprob": -13.0390625, "text": "<image>" }, { "id": 32000, "logprob": -10.921875, "text": "<image>" }, { "id": 32000, "logprob": -13.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.8671875, "text": "<image>" }, { "id": 32000, "logprob": -12.15625, "text": "<image>" }, { "id": 32000, "logprob": -12.4140625, "text": "<image>" }, { "id": 32000, "logprob": -12.421875, "text": "<image>" }, { "id": 32000, "logprob": -13.59375, "text": "<image>" }, { "id": 32000, "logprob": -12.5, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.2265625, "text": "<image>" }, { "id": 32000, "logprob": -14.3828125, "text": "<image>" }, { "id": 32000, "logprob": -11.609375, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.2578125, "text": "<image>" }, { "id": 32000, "logprob": -11.0, "text": "<image>" }, { "id": 32000, "logprob": -11.6484375, "text": "<image>" }, { "id": 32000, "logprob": -11.1328125, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.6875, "text": "<image>" }, { "id": 32000, "logprob": -12.484375, "text": "<image>" }, { "id": 32000, "logprob": -11.8828125, "text": "<image>" }, { "id": 32000, "logprob": -12.53125, "text": "<image>" }, { "id": 32000, "logprob": -12.6484375, "text": "<image>" }, { "id": 32000, "logprob": -11.7265625, "text": "<image>" }, { "id": 32000, "logprob": -13.671875, "text": "<image>" }, { "id": 32000, "logprob": -14.8046875, "text": "<image>" }, { "id": 32000, "logprob": -11.609375, "text": "<image>" }, { "id": 32000, "logprob": -13.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.421875, "text": "<image>" }, { "id": 32000, "logprob": -12.328125, "text": "<image>" }, { "id": 32000, "logprob": -13.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.5078125, "text": "<image>" }, { "id": 32000, "logprob": -13.125, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -12.2890625, "text": "<image>" }, { "id": 32000, "logprob": -12.7734375, "text": "<image>" }, { "id": 32000, "logprob": -13.15625, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -13.8203125, "text": "<image>" }, { "id": 32000, "logprob": -13.65625, "text": "<image>" }, { "id": 32000, "logprob": -13.0390625, "text": "<image>" }, { "id": 32000, "logprob": -15.6953125, "text": "<image>" }, { "id": 32000, "logprob": -14.2265625, "text": "<image>" }, { "id": 32000, "logprob": -14.4140625, "text": "<image>" }, { "id": 32000, "logprob": -13.1328125, "text": "<image>" }, { "id": 32000, "logprob": -12.296875, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.328125, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -14.0, "text": "<image>" }, { "id": 32000, "logprob": -14.234375, "text": "<image>" }, { "id": 32000, "logprob": -11.4375, "text": "<image>" }, { "id": 32000, "logprob": -11.828125, "text": "<image>" }, { "id": 32000, "logprob": -11.453125, "text": "<image>" }, { "id": 32000, "logprob": -11.453125, "text": "<image>" }, { "id": 32000, "logprob": -11.984375, "text": "<image>" }, { "id": 32000, "logprob": -11.609375, "text": "<image>" }, { "id": 32000, "logprob": -11.8046875, "text": "<image>" }, { "id": 32000, "logprob": -11.7109375, "text": "<image>" }, { "id": 32000, "logprob": -12.0859375, "text": "<image>" }, { "id": 32000, "logprob": -10.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.734375, "text": "<image>" }, { "id": 32000, "logprob": -10.8046875, "text": "<image>" }, { "id": 32000, "logprob": -11.578125, "text": "<image>" }, { "id": 32000, "logprob": -11.15625, "text": "<image>" }, { "id": 32000, "logprob": -12.796875, "text": "<image>" }, { "id": 32000, "logprob": -11.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.328125, "text": "<image>" }, { "id": 32000, "logprob": -12.9296875, "text": "<image>" }, { "id": 32000, "logprob": -11.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.5625, "text": "<image>" }, { "id": 32000, "logprob": -11.84375, "text": "<image>" }, { "id": 32000, "logprob": -10.4140625, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -12.671875, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -13.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.0703125, "text": "<image>" }, { "id": 32000, "logprob": -13.015625, "text": "<image>" }, { "id": 32000, "logprob": -14.1484375, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -12.921875, "text": "<image>" }, { "id": 32000, "logprob": -12.96875, "text": "<image>" }, { "id": 32000, "logprob": -13.03125, "text": "<image>" }, { "id": 32000, "logprob": -13.953125, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -14.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.6015625, "text": "<image>" }, { "id": 32000, "logprob": -13.640625, "text": "<image>" }, { "id": 32000, "logprob": -15.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.7421875, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -11.5078125, "text": "<image>" }, { "id": 32000, "logprob": -12.3828125, "text": "<image>" }, { "id": 32000, "logprob": -13.6328125, "text": "<image>" }, { "id": 32000, "logprob": -12.4296875, "text": "<image>" }, { "id": 32000, "logprob": -11.3984375, "text": "<image>" }, { "id": 32000, "logprob": -11.5, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -14.0859375, "text": "<image>" }, { "id": 32000, "logprob": -10.9296875, "text": "<image>" }, { "id": 32000, "logprob": -11.0546875, "text": "<image>" }, { "id": 32000, "logprob": -11.296875, "text": "<image>" }, { "id": 32000, "logprob": -11.9609375, "text": "<image>" }, { "id": 32000, "logprob": -14.96875, "text": "<image>" }, { "id": 32000, "logprob": -12.6328125, "text": "<image>" }, { "id": 32000, "logprob": -12.75, "text": "<image>" }, { "id": 32000, "logprob": -12.6953125, "text": "<image>" }, { "id": 32000, "logprob": -11.9609375, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.2890625, "text": "<image>" }, { "id": 32000, "logprob": -11.421875, "text": "<image>" }, { "id": 32000, "logprob": -10.984375, "text": "<image>" }, { "id": 32000, "logprob": -12.140625, "text": "<image>" }, { "id": 32000, "logprob": -12.2578125, "text": "<image>" }, { "id": 32000, "logprob": -12.46875, "text": "<image>" }, { "id": 32000, "logprob": -11.5, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -13.9609375, "text": "<image>" }, { "id": 32000, "logprob": -14.1328125, "text": "<image>" }, { "id": 32000, "logprob": -12.9375, "text": "<image>" }, { "id": 32000, "logprob": -14.6328125, "text": "<image>" }, { "id": 32000, "logprob": -12.609375, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.7578125, "text": "<image>" }, { "id": 32000, "logprob": -12.8828125, "text": "<image>" }, { "id": 32000, "logprob": -13.3125, "text": "<image>" }, { "id": 32000, "logprob": -13.2734375, "text": "<image>" }, { "id": 32000, "logprob": -12.75, "text": "<image>" }, { "id": 32000, "logprob": -13.59375, "text": "<image>" }, { "id": 32000, "logprob": -13.8203125, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -13.765625, "text": "<image>" }, { "id": 32000, "logprob": -13.0625, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -14.1484375, "text": "<image>" }, { "id": 32000, "logprob": -13.25, "text": "<image>" }, { "id": 32000, "logprob": -11.9609375, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -15.0, "text": "<image>" }, { "id": 32000, "logprob": -12.9296875, "text": "<image>" }, { "id": 32000, "logprob": -12.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -11.9375, "text": "<image>" }, { "id": 32000, "logprob": -11.890625, "text": "<image>" }, { "id": 32000, "logprob": -12.5625, "text": "<image>" }, { "id": 32000, "logprob": -11.4453125, "text": "<image>" }, { "id": 32000, "logprob": -13.453125, "text": "<image>" }, { "id": 32000, "logprob": -11.109375, "text": "<image>" }, { "id": 32000, "logprob": -11.2890625, "text": "<image>" }, { "id": 32000, "logprob": -11.09375, "text": "<image>" }, { "id": 32000, "logprob": -11.1875, "text": "<image>" }, { "id": 32000, "logprob": -10.96875, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -11.4296875, "text": "<image>" }, { "id": 32000, "logprob": -12.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -11.3125, "text": "<image>" }, { "id": 32000, "logprob": -11.1171875, "text": "<image>" }, { "id": 32000, "logprob": -11.75, "text": "<image>" }, { "id": 32000, "logprob": -10.921875, "text": "<image>" }, { "id": 32000, "logprob": -10.7890625, "text": "<image>" }, { "id": 32000, "logprob": -11.1015625, "text": "<image>" }, { "id": 32000, "logprob": -11.4296875, "text": "<image>" }, { "id": 32000, "logprob": -12.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.7265625, "text": "<image>" }, { "id": 32000, "logprob": -12.6015625, "text": "<image>" }, { "id": 32000, "logprob": -12.6796875, "text": "<image>" }, { "id": 32000, "logprob": -12.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.296875, "text": "<image>" }, { "id": 32000, "logprob": -13.3671875, "text": "<image>" }, { "id": 32000, "logprob": -12.9765625, "text": "<image>" }, { "id": 32000, "logprob": -12.109375, "text": "<image>" }, { "id": 32000, "logprob": -12.53125, "text": "<image>" }, { "id": 32000, "logprob": -12.6015625, "text": "<image>" }, { "id": 32000, "logprob": -12.625, "text": "<image>" }, { "id": 32000, "logprob": -12.109375, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -13.4296875, "text": "<image>" }, { "id": 32000, "logprob": -13.9140625, "text": "<image>" }, { "id": 32000, "logprob": -14.71875, "text": "<image>" }, { "id": 32000, "logprob": -13.6015625, "text": "<image>" }, { "id": 32000, "logprob": -13.7578125, "text": "<image>" }, { "id": 32000, "logprob": -14.0703125, "text": "<image>" }, { "id": 32000, "logprob": -14.5390625, "text": "<image>" }, { "id": 32000, "logprob": -13.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -11.421875, "text": "<image>" }, { "id": 32000, "logprob": -11.625, "text": "<image>" }, { "id": 32000, "logprob": -12.1640625, "text": "<image>" }, { "id": 32000, "logprob": -13.2421875, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -11.4296875, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -12.0703125, "text": "<image>" }, { "id": 32000, "logprob": -11.2421875, "text": "<image>" }, { "id": 32000, "logprob": -11.59375, "text": "<image>" }, { "id": 32000, "logprob": -11.3125, "text": "<image>" }, { "id": 32000, "logprob": -11.453125, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -12.2109375, "text": "<image>" }, { "id": 32000, "logprob": -12.203125, "text": "<image>" }, { "id": 32000, "logprob": -12.03125, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -11.25, "text": "<image>" }, { "id": 32000, "logprob": -12.3203125, "text": "<image>" }, { "id": 32000, "logprob": -11.0390625, "text": "<image>" }, { "id": 32000, "logprob": -11.1953125, "text": "<image>" }, { "id": 32000, "logprob": -11.1171875, "text": "<image>" }, { "id": 32000, "logprob": -11.9375, "text": "<image>" }, { "id": 32000, "logprob": -10.984375, "text": "<image>" }, { "id": 32000, "logprob": -11.390625, "text": "<image>" }, { "id": 32000, "logprob": -12.8828125, "text": "<image>" }, { "id": 32000, "logprob": -14.15625, "text": "<image>" }, { "id": 32000, "logprob": -11.75, "text": "<image>" }, { "id": 32000, "logprob": -12.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.2421875, "text": "<image>" }, { "id": 32000, "logprob": -12.1171875, "text": "<image>" }, { "id": 32000, "logprob": -12.515625, "text": "<image>" }, { "id": 32000, "logprob": -11.890625, "text": "<image>" }, { "id": 32000, "logprob": -13.171875, "text": "<image>" }, { "id": 32000, "logprob": -15.796875, "text": "<image>" }, { "id": 32000, "logprob": -11.6015625, "text": "<image>" }, { "id": 32000, "logprob": -13.328125, "text": "<image>" }, { "id": 32000, "logprob": -13.6484375, "text": "<image>" }, { "id": 32000, "logprob": -14.875, "text": "<image>" }, { "id": 32000, "logprob": -13.7265625, "text": "<image>" }, { "id": 32000, "logprob": -13.5234375, "text": "<image>" }, { "id": 32000, "logprob": -13.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.609375, "text": "<image>" }, { "id": 32000, "logprob": -13.7890625, "text": "<image>" }, { "id": 32000, "logprob": -14.359375, "text": "<image>" }, { "id": 32000, "logprob": -12.1484375, "text": "<image>" }, { "id": 32000, "logprob": -11.8125, "text": "<image>" }, { "id": 32000, "logprob": -11.890625, "text": "<image>" }, { "id": 32000, "logprob": -11.4921875, "text": "<image>" }, { "id": 32000, "logprob": -12.5703125, "text": "<image>" }, { "id": 32000, "logprob": -11.625, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.578125, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -12.34375, "text": "<image>" }, { "id": 32000, "logprob": -11.234375, "text": "<image>" }, { "id": 32000, "logprob": -11.03125, "text": "<image>" }, { "id": 32000, "logprob": -11.7265625, "text": "<image>" }, { "id": 32000, "logprob": -11.84375, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -11.2421875, "text": "<image>" }, { "id": 32000, "logprob": -11.328125, "text": "<image>" }, { "id": 32000, "logprob": -11.953125, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.3984375, "text": "<image>" }, { "id": 32000, "logprob": -11.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.203125, "text": "<image>" }, { "id": 32000, "logprob": -13.609375, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.578125, "text": "<image>" }, { "id": 32000, "logprob": -11.65625, "text": "<image>" }, { "id": 32000, "logprob": -12.25, "text": "<image>" }, { "id": 32000, "logprob": -11.7109375, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -11.3359375, "text": "<image>" }, { "id": 32000, "logprob": -11.5859375, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -11.3046875, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -12.6953125, "text": "<image>" }, { "id": 32000, "logprob": -12.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.875, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -13.1875, "text": "<image>" }, { "id": 32000, "logprob": -13.7265625, "text": "<image>" }, { "id": 32000, "logprob": -12.4296875, "text": "<image>" }, { "id": 32000, "logprob": -13.8203125, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.09375, "text": "<image>" }, { "id": 32000, "logprob": -14.546875, "text": "<image>" }, { "id": 32000, "logprob": -14.015625, "text": "<image>" }, { "id": 32000, "logprob": -12.5546875, "text": "<image>" }, { "id": 32000, "logprob": -11.7890625, "text": "<image>" }, { "id": 32000, "logprob": -10.6953125, "text": "<image>" }, { "id": 32000, "logprob": -11.59375, "text": "<image>" }, { "id": 32000, "logprob": -12.0234375, "text": "<image>" }, { "id": 32000, "logprob": -12.734375, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -14.34375, "text": "<image>" }, { "id": 32000, "logprob": -11.8984375, "text": "<image>" }, { "id": 32000, "logprob": -13.4375, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -11.03125, "text": "<image>" }, { "id": 32000, "logprob": -11.421875, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -11.2734375, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -11.6640625, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -11.484375, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.875, "text": "<image>" }, { "id": 32000, "logprob": -12.3125, "text": "<image>" }, { "id": 32000, "logprob": -11.65625, "text": "<image>" }, { "id": 32000, "logprob": -12.9140625, "text": "<image>" }, { "id": 32000, "logprob": -11.7421875, "text": "<image>" }, { "id": 32000, "logprob": -11.7265625, "text": "<image>" }, { "id": 32000, "logprob": -11.3984375, "text": "<image>" }, { "id": 32000, "logprob": -11.53125, "text": "<image>" }, { "id": 32000, "logprob": -11.34375, "text": "<image>" }, { "id": 32000, "logprob": -11.875, "text": "<image>" }, { "id": 32000, "logprob": -12.890625, "text": "<image>" }, { "id": 32000, "logprob": -10.9609375, "text": "<image>" }, { "id": 32000, "logprob": -13.265625, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -14.6640625, "text": "<image>" }, { "id": 32000, "logprob": -12.3828125, "text": "<image>" }, { "id": 32000, "logprob": -14.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -15.125, "text": "<image>" }, { "id": 32000, "logprob": -12.6640625, "text": "<image>" }, { "id": 32000, "logprob": -12.734375, "text": "<image>" }, { "id": 32000, "logprob": -14.734375, "text": "<image>" }, { "id": 32000, "logprob": -13.5234375, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -13.40625, "text": "<image>" }, { "id": 32000, "logprob": -14.3828125, "text": "<image>" }, { "id": 32000, "logprob": -12.9453125, "text": "<image>" }, { "id": 32000, "logprob": -12.609375, "text": "<image>" }, { "id": 32000, "logprob": -11.9296875, "text": "<image>" }, { "id": 32000, "logprob": -12.953125, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.0078125, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -13.8828125, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -13.84375, "text": "<image>" }, { "id": 32000, "logprob": -14.25, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.09375, "text": "<image>" }, { "id": 32000, "logprob": -13.0078125, "text": "<image>" }, { "id": 32000, "logprob": -12.140625, "text": "<image>" }, { "id": 32000, "logprob": -13.1171875, "text": "<image>" }, { "id": 32000, "logprob": -13.96875, "text": "<image>" }, { "id": 32000, "logprob": -11.625, "text": "<image>" }, { "id": 32000, "logprob": -11.484375, "text": "<image>" }, { "id": 32000, "logprob": -11.328125, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -13.5859375, "text": "<image>" }, { "id": 32000, "logprob": -11.0, "text": "<image>" }, { "id": 32000, "logprob": -12.03125, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -11.46875, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -11.7578125, "text": "<image>" }, { "id": 32000, "logprob": -11.296875, "text": "<image>" }, { "id": 32000, "logprob": -12.2109375, "text": "<image>" }, { "id": 32000, "logprob": -11.7421875, "text": "<image>" }, { "id": 32000, "logprob": -10.921875, "text": "<image>" }, { "id": 32000, "logprob": -11.953125, "text": "<image>" }, { "id": 32000, "logprob": -13.203125, "text": "<image>" }, { "id": 32000, "logprob": -11.5625, "text": "<image>" }, { "id": 32000, "logprob": -14.7421875, "text": "<image>" }, { "id": 32000, "logprob": -12.6875, "text": "<image>" }, { "id": 32000, "logprob": -12.2578125, "text": "<image>" }, { "id": 32000, "logprob": -15.296875, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -12.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -13.6875, "text": "<image>" }, { "id": 32000, "logprob": -12.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -13.6796875, "text": "<image>" }, { "id": 32000, "logprob": -14.109375, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.6953125, "text": "<image>" }, { "id": 32000, "logprob": -11.84375, "text": "<image>" }, { "id": 32000, "logprob": -11.7421875, "text": "<image>" }, { "id": 32000, "logprob": -12.7578125, "text": "<image>" }, { "id": 32000, "logprob": -11.8203125, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.453125, "text": "<image>" }, { "id": 32000, "logprob": -11.359375, "text": "<image>" }, { "id": 32000, "logprob": -12.984375, "text": "<image>" }, { "id": 32000, "logprob": -15.15625, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -12.765625, "text": "<image>" }, { "id": 32000, "logprob": -13.4921875, "text": "<image>" }, { "id": 32000, "logprob": -11.5546875, "text": "<image>" }, { "id": 32000, "logprob": -11.6953125, "text": "<image>" }, { "id": 32000, "logprob": -11.359375, "text": "<image>" }, { "id": 32000, "logprob": -11.5, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -14.5625, "text": "<image>" }, { "id": 32000, "logprob": -13.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.1953125, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.7578125, "text": "<image>" }, { "id": 32000, "logprob": -11.6015625, "text": "<image>" }, { "id": 32000, "logprob": -13.2578125, "text": "<image>" }, { "id": 32000, "logprob": -11.125, "text": "<image>" }, { "id": 32000, "logprob": -11.3359375, "text": "<image>" }, { "id": 32000, "logprob": -11.3984375, "text": "<image>" }, { "id": 32000, "logprob": -11.8125, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -11.765625, "text": "<image>" }, { "id": 32000, "logprob": -11.1953125, "text": "<image>" }, { "id": 32000, "logprob": -13.3359375, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -12.703125, "text": "<image>" }, { "id": 32000, "logprob": -12.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.3203125, "text": "<image>" }, { "id": 32000, "logprob": -13.6171875, "text": "<image>" }, { "id": 32000, "logprob": -13.328125, "text": "<image>" }, { "id": 32000, "logprob": -12.828125, "text": "<image>" }, { "id": 32000, "logprob": -13.140625, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.796875, "text": "<image>" }, { "id": 32000, "logprob": -12.78125, "text": "<image>" }, { "id": 32000, "logprob": -12.59375, "text": "<image>" }, { "id": 32000, "logprob": -12.765625, "text": "<image>" }, { "id": 32000, "logprob": -11.46875, "text": "<image>" }, { "id": 32000, "logprob": -12.484375, "text": "<image>" }, { "id": 32000, "logprob": -11.59375, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -12.59375, "text": "<image>" }, { "id": 32000, "logprob": -11.3125, "text": "<image>" }, { "id": 32000, "logprob": -14.25, "text": "<image>" }, { "id": 32000, "logprob": -12.0234375, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -11.609375, "text": "<image>" }, { "id": 32000, "logprob": -13.5234375, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -12.6953125, "text": "<image>" }, { "id": 32000, "logprob": -11.703125, "text": "<image>" }, { "id": 32000, "logprob": -11.8828125, "text": "<image>" }, { "id": 32000, "logprob": -11.84375, "text": "<image>" }, { "id": 32000, "logprob": -11.7890625, "text": "<image>" }, { "id": 32000, "logprob": -12.3515625, "text": "<image>" }, { "id": 32000, "logprob": -14.3046875, "text": "<image>" }, { "id": 32000, "logprob": -11.7578125, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -11.328125, "text": "<image>" }, { "id": 32000, "logprob": -14.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.25, "text": "<image>" }, { "id": 32000, "logprob": -11.3125, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -11.875, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -13.71875, "text": "<image>" }, { "id": 32000, "logprob": -12.0625, "text": "<image>" }, { "id": 32000, "logprob": -11.046875, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -13.390625, "text": "<image>" }, { "id": 32000, "logprob": -12.515625, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -14.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.5625, "text": "<image>" }, { "id": 32000, "logprob": -11.53125, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.46875, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -12.515625, "text": "<image>" }, { "id": 32000, "logprob": -11.7890625, "text": "<image>" }, { "id": 32000, "logprob": -13.1328125, "text": "<image>" }, { "id": 32000, "logprob": -12.8984375, "text": "<image>" }, { "id": 32000, "logprob": -14.609375, "text": "<image>" }, { "id": 32000, "logprob": -11.53125, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -13.0625, "text": "<image>" }, { "id": 32000, "logprob": -11.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -12.0234375, "text": "<image>" }, { "id": 32000, "logprob": -12.453125, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.9296875, "text": "<image>" }, { "id": 32000, "logprob": -11.7109375, "text": "<image>" }, { "id": 32000, "logprob": -11.546875, "text": "<image>" }, { "id": 32000, "logprob": -12.796875, "text": "<image>" }, { "id": 32000, "logprob": -13.0234375, "text": "<image>" }, { "id": 32000, "logprob": -12.8125, "text": "<image>" }, { "id": 32000, "logprob": -12.59375, "text": "<image>" }, { "id": 32000, "logprob": -10.7890625, "text": "<image>" }, { "id": 32000, "logprob": -11.96875, "text": "<image>" }, { "id": 32000, "logprob": -11.4921875, "text": "<image>" }, { "id": 32000, "logprob": -11.25, "text": "<image>" }, { "id": 32000, "logprob": -11.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.6953125, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -11.515625, "text": "<image>" }, { "id": 32000, "logprob": -11.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -11.2109375, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.6484375, "text": "<image>" }, { "id": 32000, "logprob": -11.515625, "text": "<image>" }, { "id": 32000, "logprob": -12.8125, "text": "<image>" }, { "id": 32000, "logprob": -12.1953125, "text": "<image>" }, { "id": 32000, "logprob": -13.2890625, "text": "<image>" }, { "id": 32000, "logprob": -11.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.3359375, "text": "<image>" }, { "id": 32000, "logprob": -12.6015625, "text": "<image>" }, { "id": 32000, "logprob": -13.0078125, "text": "<image>" }, { "id": 32000, "logprob": -15.328125, "text": "<image>" }, { "id": 32000, "logprob": -14.8125, "text": "<image>" }, { "id": 32000, "logprob": -11.0390625, "text": "<image>" }, { "id": 32000, "logprob": -13.0703125, "text": "<image>" }, { "id": 32000, "logprob": -11.3828125, "text": "<image>" }, { "id": 32000, "logprob": -14.0859375, "text": "<image>" }, { "id": 32000, "logprob": -14.734375, "text": "<image>" }, { "id": 32000, "logprob": -10.0234375, "text": "<image>" }, { "id": 32000, "logprob": -11.46875, "text": "<image>" }, { "id": 32000, "logprob": -13.953125, "text": "<image>" }, { "id": 32000, "logprob": -14.28125, "text": "<image>" }, { "id": 32000, "logprob": -14.8828125, "text": "<image>" }, { "id": 32000, "logprob": -11.578125, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -12.34375, "text": "<image>" }, { "id": 32000, "logprob": -11.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.0078125, "text": "<image>" }, { "id": 32000, "logprob": -12.4296875, "text": "<image>" }, { "id": 32000, "logprob": -11.8984375, "text": "<image>" }, { "id": 32000, "logprob": -11.984375, "text": "<image>" }, { "id": 32000, "logprob": -11.734375, "text": "<image>" }, { "id": 32000, "logprob": -11.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.1875, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -12.953125, "text": "<image>" }, { "id": 32000, "logprob": -10.40625, "text": "<image>" }, { "id": 32000, "logprob": -13.1015625, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.7890625, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -11.765625, "text": "<image>" }, { "id": 32000, "logprob": -11.578125, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.34375, "text": "<image>" }, { "id": 32000, "logprob": -12.015625, "text": "<image>" }, { "id": 32000, "logprob": -11.75, "text": "<image>" }, { "id": 32000, "logprob": -12.2578125, "text": "<image>" }, { "id": 32000, "logprob": -11.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -12.4453125, "text": "<image>" }, { "id": 32000, "logprob": -11.8671875, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -15.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -12.25, "text": "<image>" }, { "id": 32000, "logprob": -14.421875, "text": "<image>" }, { "id": 32000, "logprob": -13.03125, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -14.8046875, "text": "<image>" }, { "id": 32000, "logprob": -13.53125, "text": "<image>" }, { "id": 32000, "logprob": -15.03125, "text": "<image>" }, { "id": 32000, "logprob": -13.8984375, "text": "<image>" }, { "id": 32000, "logprob": -10.578125, "text": "<image>" }, { "id": 32000, "logprob": -15.0, "text": "<image>" }, { "id": 32000, "logprob": -12.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.8203125, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -11.15625, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -11.515625, "text": "<image>" }, { "id": 32000, "logprob": -11.4765625, "text": "<image>" }, { "id": 32000, "logprob": -11.4765625, "text": "<image>" }, { "id": 32000, "logprob": -11.4609375, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.9609375, "text": "<image>" }, { "id": 32000, "logprob": -12.8515625, "text": "<image>" }, { "id": 32000, "logprob": -12.03125, "text": "<image>" }, { "id": 32000, "logprob": -13.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.3203125, "text": "<image>" }, { "id": 32000, "logprob": -10.8515625, "text": "<image>" }, { "id": 32000, "logprob": -10.671875, "text": "<image>" }, { "id": 32000, "logprob": -11.875, "text": "<image>" }, { "id": 32000, "logprob": -10.9140625, "text": "<image>" }, { "id": 32000, "logprob": -10.65625, "text": "<image>" }, { "id": 32000, "logprob": -10.484375, "text": "<image>" }, { "id": 32000, "logprob": -11.1796875, "text": "<image>" }, { "id": 32000, "logprob": -12.2890625, "text": "<image>" }, { "id": 32000, "logprob": -10.96875, "text": "<image>" }, { "id": 32000, "logprob": -11.6015625, "text": "<image>" }, { "id": 32000, "logprob": -11.4453125, "text": "<image>" }, { "id": 32000, "logprob": -10.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.1640625, "text": "<image>" }, { "id": 32000, "logprob": -11.0859375, "text": "<image>" }, { "id": 32000, "logprob": -11.546875, "text": "<image>" }, { "id": 32000, "logprob": -15.765625, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.484375, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -10.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.25, "text": "<image>" }, { "id": 32000, "logprob": -11.828125, "text": "<image>" }, { "id": 32000, "logprob": -11.84375, "text": "<image>" }, { "id": 32000, "logprob": -11.0625, "text": "<image>" }, { "id": 32000, "logprob": -13.953125, "text": "<image>" }, { "id": 32000, "logprob": -13.5703125, "text": "<image>" }, { "id": 32000, "logprob": -13.1796875, "text": "<image>" }, { "id": 32000, "logprob": -13.6171875, "text": "<image>" }, { "id": 32000, "logprob": -14.0078125, "text": "<image>" }, { "id": 32000, "logprob": -11.171875, "text": "<image>" }, { "id": 32000, "logprob": -14.140625, "text": "<image>" }, { "id": 32000, "logprob": -12.25, "text": "<image>" }, { "id": 32000, "logprob": -11.7265625, "text": "<image>" }, { "id": 32000, "logprob": -12.3203125, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -11.8828125, "text": "<image>" }, { "id": 32000, "logprob": -12.3125, "text": "<image>" }, { "id": 32000, "logprob": -11.5625, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -14.6796875, "text": "<image>" }, { "id": 32000, "logprob": -11.046875, "text": "<image>" }, { "id": 32000, "logprob": -11.5, "text": "<image>" }, { "id": 32000, "logprob": -12.8125, "text": "<image>" }, { "id": 32000, "logprob": -11.9296875, "text": "<image>" }, { "id": 32000, "logprob": -10.546875, "text": "<image>" }, { "id": 32000, "logprob": -11.7109375, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -12.0859375, "text": "<image>" }, { "id": 32000, "logprob": -11.4296875, "text": "<image>" }, { "id": 32000, "logprob": -12.46875, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -11.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.96875, "text": "<image>" }, { "id": 32000, "logprob": -10.875, "text": "<image>" }, { "id": 32000, "logprob": -10.890625, "text": "<image>" }, { "id": 32000, "logprob": -11.0078125, "text": "<image>" }, { "id": 32000, "logprob": -10.609375, "text": "<image>" }, { "id": 32000, "logprob": -10.7890625, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -11.390625, "text": "<image>" }, { "id": 32000, "logprob": -11.2890625, "text": "<image>" }, { "id": 32000, "logprob": -11.984375, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -12.2421875, "text": "<image>" }, { "id": 32000, "logprob": -11.3828125, "text": "<image>" }, { "id": 32000, "logprob": -10.7734375, "text": "<image>" }, { "id": 32000, "logprob": -10.703125, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -12.0, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -11.4453125, "text": "<image>" }, { "id": 32000, "logprob": -9.6328125, "text": "<image>" }, { "id": 32000, "logprob": -10.859375, "text": "<image>" }, { "id": 32000, "logprob": -13.03125, "text": "<image>" }, { "id": 32000, "logprob": -12.890625, "text": "<image>" }, { "id": 32000, "logprob": -13.4765625, "text": "<image>" }, { "id": 32000, "logprob": -14.109375, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -13.484375, "text": "<image>" }, { "id": 32000, "logprob": -13.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -10.90625, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -12.5703125, "text": "<image>" }, { "id": 32000, "logprob": -12.7734375, "text": "<image>" }, { "id": 32000, "logprob": -12.890625, "text": "<image>" }, { "id": 32000, "logprob": -13.0625, "text": "<image>" }, { "id": 32000, "logprob": -11.2265625, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -12.2890625, "text": "<image>" }, { "id": 32000, "logprob": -11.859375, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -15.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -11.0, "text": "<image>" }, { "id": 32000, "logprob": -11.2734375, "text": "<image>" }, { "id": 32000, "logprob": -12.3203125, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -10.9296875, "text": "<image>" }, { "id": 32000, "logprob": -10.921875, "text": "<image>" }, { "id": 32000, "logprob": -10.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.59375, "text": "<image>" }, { "id": 32000, "logprob": -11.40625, "text": "<image>" }, { "id": 32000, "logprob": -11.53125, "text": "<image>" }, { "id": 32000, "logprob": -10.8046875, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -11.6875, "text": "<image>" }, { "id": 32000, "logprob": -11.6484375, "text": "<image>" }, { "id": 32000, "logprob": -12.5078125, "text": "<image>" }, { "id": 32000, "logprob": -12.1171875, "text": "<image>" }, { "id": 32000, "logprob": -12.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -12.0, "text": "<image>" }, { "id": 32000, "logprob": -13.7265625, "text": "<image>" }, { "id": 32000, "logprob": -15.7734375, "text": "<image>" }, { "id": 32000, "logprob": -11.65625, "text": "<image>" }, { "id": 32000, "logprob": -12.359375, "text": "<image>" }, { "id": 32000, "logprob": -12.1484375, "text": "<image>" }, { "id": 32000, "logprob": -14.21875, "text": "<image>" }, { "id": 32000, "logprob": -14.796875, "text": "<image>" }, { "id": 32000, "logprob": -17.5625, "text": "<image>" }, { "id": 32000, "logprob": -15.234375, "text": "<image>" }, { "id": 32000, "logprob": -14.4765625, "text": "<image>" }, { "id": 32000, "logprob": -10.6015625, "text": "<image>" }, { "id": 32000, "logprob": -12.0703125, "text": "<image>" }, { "id": 32000, "logprob": -12.84375, "text": "<image>" }, { "id": 32000, "logprob": -12.2421875, "text": "<image>" }, { "id": 32000, "logprob": -13.203125, "text": "<image>" }, { "id": 32000, "logprob": -11.3203125, "text": "<image>" }, { "id": 32000, "logprob": -10.9375, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -10.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.8125, "text": "<image>" }, { "id": 32000, "logprob": -11.8203125, "text": "<image>" }, { "id": 32000, "logprob": -12.296875, "text": "<image>" }, { "id": 32000, "logprob": -10.8046875, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -15.2265625, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -12.015625, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -11.109375, "text": "<image>" }, { "id": 32000, "logprob": -12.703125, "text": "<image>" }, { "id": 32000, "logprob": -11.6328125, "text": "<image>" }, { "id": 32000, "logprob": -11.515625, "text": "<image>" }, { "id": 32000, "logprob": -11.546875, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.453125, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -12.9609375, "text": "<image>" }, { "id": 32000, "logprob": -12.578125, "text": "<image>" }, { "id": 32000, "logprob": -11.8671875, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -11.125, "text": "<image>" }, { "id": 32000, "logprob": -11.5, "text": "<image>" }, { "id": 32000, "logprob": -12.1484375, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -14.453125, "text": "<image>" }, { "id": 32000, "logprob": -12.7890625, "text": "<image>" }, { "id": 32000, "logprob": -14.890625, "text": "<image>" }, { "id": 32000, "logprob": -12.984375, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -14.7421875, "text": "<image>" }, { "id": 32000, "logprob": -13.34375, "text": "<image>" }, { "id": 32000, "logprob": -13.3828125, "text": "<image>" }, { "id": 32000, "logprob": -14.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.71875, "text": "<image>" }, { "id": 32000, "logprob": -14.8203125, "text": "<image>" }, { "id": 32000, "logprob": -14.625, "text": "<image>" }, { "id": 32000, "logprob": -12.6875, "text": "<image>" }, { "id": 32000, "logprob": -12.4296875, "text": "<image>" }, { "id": 32000, "logprob": -12.6171875, "text": "<image>" }, { "id": 32000, "logprob": -13.765625, "text": "<image>" }, { "id": 32000, "logprob": -13.8046875, "text": "<image>" }, { "id": 32000, "logprob": -11.4921875, "text": "<image>" }, { "id": 32000, "logprob": -12.5078125, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -13.796875, "text": "<image>" }, { "id": 32000, "logprob": -11.203125, "text": "<image>" }, { "id": 32000, "logprob": -11.78125, "text": "<image>" }, { "id": 32000, "logprob": -16.53125, "text": "<image>" }, { "id": 32000, "logprob": -12.296875, "text": "<image>" }, { "id": 32000, "logprob": -14.1484375, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -12.4921875, "text": "<image>" }, { "id": 32000, "logprob": -10.59375, "text": "<image>" }, { "id": 32000, "logprob": -12.9296875, "text": "<image>" }, { "id": 32000, "logprob": -13.6328125, "text": "<image>" }, { "id": 32000, "logprob": -12.3203125, "text": "<image>" }, { "id": 32000, "logprob": -12.359375, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -12.1953125, "text": "<image>" }, { "id": 32000, "logprob": -12.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.6484375, "text": "<image>" }, { "id": 32000, "logprob": -11.109375, "text": "<image>" }, { "id": 32000, "logprob": -11.140625, "text": "<image>" }, { "id": 32000, "logprob": -13.90625, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -14.9921875, "text": "<image>" }, { "id": 32000, "logprob": -13.375, "text": "<image>" }, { "id": 32000, "logprob": -14.703125, "text": "<image>" }, { "id": 32000, "logprob": -11.328125, "text": "<image>" }, { "id": 32000, "logprob": -14.890625, "text": "<image>" }, { "id": 32000, "logprob": -13.1875, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -13.3984375, "text": "<image>" }, { "id": 32000, "logprob": -13.9453125, "text": "<image>" }, { "id": 32000, "logprob": -13.1875, "text": "<image>" }, { "id": 32000, "logprob": -11.28125, "text": "<image>" }, { "id": 32000, "logprob": -15.1640625, "text": "<image>" }, { "id": 32000, "logprob": -15.0234375, "text": "<image>" }, { "id": 32000, "logprob": -14.8203125, "text": "<image>" }, { "id": 32000, "logprob": -14.828125, "text": "<image>" }, { "id": 32000, "logprob": -14.078125, "text": "<image>" }, { "id": 32000, "logprob": -13.8984375, "text": "<image>" }, { "id": 32000, "logprob": -15.3125, "text": "<image>" }, { "id": 32000, "logprob": -12.8125, "text": "<image>" }, { "id": 32000, "logprob": -12.90625, "text": "<image>" }, { "id": 32000, "logprob": -13.046875, "text": "<image>" }, { "id": 32000, "logprob": -10.625, "text": "<image>" }, { "id": 32000, "logprob": -11.25, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -13.3984375, "text": "<image>" }, { "id": 32000, "logprob": -12.484375, "text": "<image>" }, { "id": 32000, "logprob": -12.2578125, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -12.6015625, "text": "<image>" }, { "id": 32000, "logprob": -12.421875, "text": "<image>" }, { "id": 32000, "logprob": -12.484375, "text": "<image>" }, { "id": 32000, "logprob": -14.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.3125, "text": "<image>" }, { "id": 32000, "logprob": -12.578125, "text": "<image>" }, { "id": 32000, "logprob": -15.1953125, "text": "<image>" }, { "id": 32000, "logprob": -12.4921875, "text": "<image>" }, { "id": 32000, "logprob": -11.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -12.15625, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.7734375, "text": "<image>" }, { "id": 32000, "logprob": -11.3046875, "text": "<image>" }, { "id": 32000, "logprob": -11.109375, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -13.0234375, "text": "<image>" }, { "id": 32000, "logprob": -10.9296875, "text": "<image>" }, { "id": 32000, "logprob": -13.5078125, "text": "<image>" }, { "id": 32000, "logprob": -11.5, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -11.703125, "text": "<image>" }, { "id": 32000, "logprob": -14.0234375, "text": "<image>" }, { "id": 32000, "logprob": -15.2578125, "text": "<image>" }, { "id": 32000, "logprob": -17.953125, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -11.6875, "text": "<image>" }, { "id": 32000, "logprob": -12.1171875, "text": "<image>" }, { "id": 32000, "logprob": -11.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.90625, "text": "<image>" }, { "id": 32000, "logprob": -12.6171875, "text": "<image>" }, { "id": 32000, "logprob": -13.53125, "text": "<image>" }, { "id": 32000, "logprob": -14.828125, "text": "<image>" }, { "id": 32000, "logprob": -13.953125, "text": "<image>" }, { "id": 32000, "logprob": -11.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.7421875, "text": "<image>" }, { "id": 32000, "logprob": -13.484375, "text": "<image>" }, { "id": 32000, "logprob": -11.4609375, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.984375, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -15.9375, "text": "<image>" }, { "id": 32000, "logprob": -11.984375, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -12.03125, "text": "<image>" }, { "id": 32000, "logprob": -14.3046875, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -12.1171875, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -13.1484375, "text": "<image>" }, { "id": 32000, "logprob": -12.1875, "text": "<image>" }, { "id": 32000, "logprob": -13.359375, "text": "<image>" }, { "id": 32000, "logprob": -13.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.2890625, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -10.7421875, "text": "<image>" }, { "id": 32000, "logprob": -10.515625, "text": "<image>" }, { "id": 32000, "logprob": -12.6640625, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -11.484375, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -11.890625, "text": "<image>" }, { "id": 32000, "logprob": -11.140625, "text": "<image>" }, { "id": 32000, "logprob": -10.96875, "text": "<image>" }, { "id": 32000, "logprob": -11.609375, "text": "<image>" }, { "id": 32000, "logprob": -12.4609375, "text": "<image>" }, { "id": 32000, "logprob": -12.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.8203125, "text": "<image>" }, { "id": 32000, "logprob": -13.359375, "text": "<image>" }, { "id": 32000, "logprob": -12.0234375, "text": "<image>" }, { "id": 32000, "logprob": -12.484375, "text": "<image>" }, { "id": 32000, "logprob": -14.2890625, "text": "<image>" }, { "id": 32000, "logprob": -11.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.890625, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -15.3046875, "text": "<image>" }, { "id": 32000, "logprob": -12.8828125, "text": "<image>" }, { "id": 32000, "logprob": -13.453125, "text": "<image>" }, { "id": 32000, "logprob": -11.8125, "text": "<image>" }, { "id": 32000, "logprob": -12.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.9765625, "text": "<image>" }, { "id": 32000, "logprob": -11.96875, "text": "<image>" }, { "id": 32000, "logprob": -13.4296875, "text": "<image>" }, { "id": 32000, "logprob": -14.9453125, "text": "<image>" }, { "id": 32000, "logprob": -13.28125, "text": "<image>" }, { "id": 32000, "logprob": -13.2578125, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -11.453125, "text": "<image>" }, { "id": 32000, "logprob": -12.1640625, "text": "<image>" }, { "id": 32000, "logprob": -10.3828125, "text": "<image>" }, { "id": 32000, "logprob": -11.8203125, "text": "<image>" }, { "id": 32000, "logprob": -13.203125, "text": "<image>" }, { "id": 32000, "logprob": -12.3359375, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -13.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.65625, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.9375, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -12.3515625, "text": "<image>" }, { "id": 32000, "logprob": -10.5625, "text": "<image>" }, { "id": 32000, "logprob": -10.90625, "text": "<image>" }, { "id": 32000, "logprob": -11.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.3828125, "text": "<image>" }, { "id": 32000, "logprob": -11.5234375, "text": "<image>" }, { "id": 32000, "logprob": -16.578125, "text": "<image>" }, { "id": 32000, "logprob": -12.515625, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -12.5390625, "text": "<image>" }, { "id": 32000, "logprob": -12.0078125, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -13.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -11.9296875, "text": "<image>" }, { "id": 32000, "logprob": -10.59375, "text": "<image>" }, { "id": 32000, "logprob": -10.53125, "text": "<image>" }, { "id": 32000, "logprob": -11.4375, "text": "<image>" }, { "id": 32000, "logprob": -11.2109375, "text": "<image>" }, { "id": 32000, "logprob": -13.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.15625, "text": "<image>" }, { "id": 32000, "logprob": -15.796875, "text": "<image>" }, { "id": 32000, "logprob": -10.9765625, "text": "<image>" }, { "id": 32000, "logprob": -11.6328125, "text": "<image>" }, { "id": 32000, "logprob": -10.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.859375, "text": "<image>" }, { "id": 32000, "logprob": -13.125, "text": "<image>" }, { "id": 32000, "logprob": -12.6640625, "text": "<image>" }, { "id": 32000, "logprob": -16.046875, "text": "<image>" }, { "id": 32000, "logprob": -11.515625, "text": "<image>" }, { "id": 32000, "logprob": -13.734375, "text": "<image>" }, { "id": 32000, "logprob": -14.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.2109375, "text": "<image>" }, { "id": 32000, "logprob": -10.65625, "text": "<image>" }, { "id": 32000, "logprob": -15.6484375, "text": "<image>" }, { "id": 32000, "logprob": -10.9296875, "text": "<image>" }, { "id": 32000, "logprob": -11.328125, "text": "<image>" }, { "id": 32000, "logprob": -11.0546875, "text": "<image>" }, { "id": 32000, "logprob": -10.4765625, "text": "<image>" }, { "id": 32000, "logprob": -10.96875, "text": "<image>" }, { "id": 32000, "logprob": -10.890625, "text": "<image>" }, { "id": 32000, "logprob": -10.8046875, "text": "<image>" }, { "id": 32000, "logprob": -12.359375, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -14.2734375, "text": "<image>" }, { "id": 32000, "logprob": -11.4921875, "text": "<image>" }, { "id": 32000, "logprob": -10.984375, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -13.21875, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -11.8828125, "text": "<image>" }, { "id": 32000, "logprob": -12.15625, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -12.0703125, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -13.0390625, "text": "<image>" }, { "id": 32000, "logprob": -12.1484375, "text": "<image>" }, { "id": 32000, "logprob": -13.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.203125, "text": "<image>" }, { "id": 32000, "logprob": -11.9375, "text": "<image>" }, { "id": 32000, "logprob": -12.1796875, "text": "<image>" }, { "id": 32000, "logprob": -10.7421875, "text": "<image>" }, { "id": 32000, "logprob": -10.8203125, "text": "<image>" }, { "id": 32000, "logprob": -12.375, "text": "<image>" }, { "id": 32000, "logprob": -11.484375, "text": "<image>" }, { "id": 32000, "logprob": -15.84375, "text": "<image>" }, { "id": 32000, "logprob": -12.1640625, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -13.1640625, "text": "<image>" }, { "id": 32000, "logprob": -16.8125, "text": "<image>" }, { "id": 32000, "logprob": -15.765625, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -12.46875, "text": "<image>" }, { "id": 32000, "logprob": -13.109375, "text": "<image>" }, { "id": 32000, "logprob": -14.5078125, "text": "<image>" }, { "id": 32000, "logprob": -14.5546875, "text": "<image>" }, { "id": 32000, "logprob": -10.953125, "text": "<image>" }, { "id": 32000, "logprob": -13.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.3984375, "text": "<image>" }, { "id": 32000, "logprob": -12.9375, "text": "<image>" }, { "id": 32000, "logprob": -12.953125, "text": "<image>" }, { "id": 32000, "logprob": -12.0234375, "text": "<image>" }, { "id": 32000, "logprob": -12.3125, "text": "<image>" }, { "id": 32000, "logprob": -17.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.5, "text": "<image>" }, { "id": 32000, "logprob": -12.484375, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.84375, "text": "<image>" }, { "id": 32000, "logprob": -13.515625, "text": "<image>" }, { "id": 32000, "logprob": -14.171875, "text": "<image>" }, { "id": 32000, "logprob": -12.9609375, "text": "<image>" }, { "id": 32000, "logprob": -11.265625, "text": "<image>" }, { "id": 32000, "logprob": -14.828125, "text": "<image>" }, { "id": 32000, "logprob": -11.1171875, "text": "<image>" }, { "id": 32000, "logprob": -11.953125, "text": "<image>" }, { "id": 32000, "logprob": -14.6640625, "text": "<image>" }, { "id": 32000, "logprob": -11.2421875, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -12.8125, "text": "<image>" }, { "id": 32000, "logprob": -11.25, "text": "<image>" }, { "id": 32000, "logprob": -11.8125, "text": "<image>" }, { "id": 32000, "logprob": -14.28125, "text": "<image>" }, { "id": 32000, "logprob": -11.09375, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.1875, "text": "<image>" }, { "id": 32000, "logprob": -13.765625, "text": "<image>" }, { "id": 32000, "logprob": -12.25, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -13.796875, "text": "<image>" }, { "id": 32000, "logprob": -10.6171875, "text": "<image>" }, { "id": 32000, "logprob": -11.546875, "text": "<image>" }, { "id": 32000, "logprob": -10.9609375, "text": "<image>" }, { "id": 32000, "logprob": -12.1171875, "text": "<image>" }, { "id": 32000, "logprob": -10.796875, "text": "<image>" }, { "id": 32000, "logprob": -12.6328125, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -13.140625, "text": "<image>" }, { "id": 32000, "logprob": -14.21875, "text": "<image>" }, { "id": 32000, "logprob": -13.0546875, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -11.6875, "text": "<image>" }, { "id": 32000, "logprob": -12.7265625, "text": "<image>" }, { "id": 32000, "logprob": -17.09375, "text": "<image>" }, { "id": 32000, "logprob": -15.4375, "text": "<image>" }, { "id": 32000, "logprob": -11.890625, "text": "<image>" }, { "id": 32000, "logprob": -13.6015625, "text": "<image>" }, { "id": 32000, "logprob": -11.4921875, "text": "<image>" }, { "id": 32000, "logprob": -11.8984375, "text": "<image>" }, { "id": 32000, "logprob": -13.375, "text": "<image>" }, { "id": 32000, "logprob": -13.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.5390625, "text": "<image>" }, { "id": 32000, "logprob": -15.1171875, "text": "<image>" }, { "id": 32000, "logprob": -15.90625, "text": "<image>" }, { "id": 32000, "logprob": -12.828125, "text": "<image>" }, { "id": 32000, "logprob": -12.7109375, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -13.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.859375, "text": "<image>" }, { "id": 32000, "logprob": -11.6953125, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -10.59375, "text": "<image>" }, { "id": 32000, "logprob": -10.8828125, "text": "<image>" }, { "id": 32000, "logprob": -11.34375, "text": "<image>" }, { "id": 32000, "logprob": -12.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.515625, "text": "<image>" }, { "id": 32000, "logprob": -12.6796875, "text": "<image>" }, { "id": 32000, "logprob": -10.7578125, "text": "<image>" }, { "id": 32000, "logprob": -11.40625, "text": "<image>" }, { "id": 32000, "logprob": -10.75, "text": "<image>" }, { "id": 32000, "logprob": -11.2109375, "text": "<image>" }, { "id": 32000, "logprob": -10.875, "text": "<image>" }, { "id": 32000, "logprob": -13.1328125, "text": "<image>" }, { "id": 32000, "logprob": -10.53125, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -12.5234375, "text": "<image>" }, { "id": 32000, "logprob": -17.0, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -11.3203125, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -11.2421875, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -11.4765625, "text": "<image>" }, { "id": 32000, "logprob": -11.34375, "text": "<image>" }, { "id": 32000, "logprob": -12.6875, "text": "<image>" }, { "id": 32000, "logprob": -13.421875, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -13.46875, "text": "<image>" }, { "id": 32000, "logprob": -12.3125, "text": "<image>" }, { "id": 32000, "logprob": -11.6015625, "text": "<image>" }, { "id": 32000, "logprob": -13.359375, "text": "<image>" }, { "id": 32000, "logprob": -13.0625, "text": "<image>" }, { "id": 32000, "logprob": -11.2421875, "text": "<image>" }, { "id": 32000, "logprob": -15.34375, "text": "<image>" }, { "id": 32000, "logprob": -13.5, "text": "<image>" }, { "id": 32000, "logprob": -10.7734375, "text": "<image>" }, { "id": 32000, "logprob": -13.5390625, "text": "<image>" }, { "id": 32000, "logprob": -12.703125, "text": "<image>" }, { "id": 32000, "logprob": -13.1953125, "text": "<image>" }, { "id": 32000, "logprob": -17.171875, "text": "<image>" }, { "id": 32000, "logprob": -15.0234375, "text": "<image>" }, { "id": 32000, "logprob": -10.9609375, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -11.515625, "text": "<image>" }, { "id": 32000, "logprob": -16.125, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.609375, "text": "<image>" }, { "id": 32000, "logprob": -14.6015625, "text": "<image>" }, { "id": 32000, "logprob": -11.8984375, "text": "<image>" }, { "id": 32000, "logprob": -10.9375, "text": "<image>" }, { "id": 32000, "logprob": -13.3203125, "text": "<image>" }, { "id": 32000, "logprob": -11.78125, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -12.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.0, "text": "<image>" }, { "id": 32000, "logprob": -13.796875, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -12.4140625, "text": "<image>" }, { "id": 32000, "logprob": -14.3203125, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -13.875, "text": "<image>" }, { "id": 32000, "logprob": -14.3203125, "text": "<image>" }, { "id": 32000, "logprob": -12.375, "text": "<image>" }, { "id": 32000, "logprob": -13.9375, "text": "<image>" }, { "id": 32000, "logprob": -12.3203125, "text": "<image>" }, { "id": 32000, "logprob": -14.609375, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -10.6484375, "text": "<image>" }, { "id": 32000, "logprob": -12.0, "text": "<image>" }, { "id": 32000, "logprob": -11.0703125, "text": "<image>" }, { "id": 32000, "logprob": -11.5234375, "text": "<image>" }, { "id": 32000, "logprob": -11.5625, "text": "<image>" }, { "id": 32000, "logprob": -11.7265625, "text": "<image>" }, { "id": 32000, "logprob": -12.6640625, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -12.1171875, "text": "<image>" }, { "id": 32000, "logprob": -16.09375, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -13.4140625, "text": "<image>" }, { "id": 32000, "logprob": -13.453125, "text": "<image>" }, { "id": 32000, "logprob": -15.4921875, "text": "<image>" }, { "id": 32000, "logprob": -12.9140625, "text": "<image>" }, { "id": 32000, "logprob": -14.8125, "text": "<image>" }, { "id": 32000, "logprob": -13.375, "text": "<image>" }, { "id": 32000, "logprob": -12.0703125, "text": "<image>" }, { "id": 32000, "logprob": -15.234375, "text": "<image>" }, { "id": 32000, "logprob": -11.6875, "text": "<image>" }, { "id": 32000, "logprob": -10.5390625, "text": "<image>" }, { "id": 32000, "logprob": -14.703125, "text": "<image>" }, { "id": 32000, "logprob": -13.9140625, "text": "<image>" }, { "id": 32000, "logprob": -11.6875, "text": "<image>" }, { "id": 32000, "logprob": -17.453125, "text": "<image>" }, { "id": 32000, "logprob": -12.2578125, "text": "<image>" }, { "id": 32000, "logprob": -10.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.7890625, "text": "<image>" }, { "id": 32000, "logprob": -11.5, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -14.265625, "text": "<image>" }, { "id": 32000, "logprob": -13.6953125, "text": "<image>" }, { "id": 32000, "logprob": -9.796875, "text": "<image>" }, { "id": 32000, "logprob": -10.59375, "text": "<image>" }, { "id": 32000, "logprob": -12.890625, "text": "<image>" }, { "id": 32000, "logprob": -11.9375, "text": "<image>" }, { "id": 32000, "logprob": -14.734375, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -14.7265625, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -12.7890625, "text": "<image>" }, { "id": 32000, "logprob": -11.015625, "text": "<image>" }, { "id": 32000, "logprob": -16.421875, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -11.46875, "text": "<image>" }, { "id": 32000, "logprob": -16.53125, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -13.3515625, "text": "<image>" }, { "id": 32000, "logprob": -13.96875, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.84375, "text": "<image>" }, { "id": 32000, "logprob": -11.96875, "text": "<image>" }, { "id": 32000, "logprob": -12.515625, "text": "<image>" }, { "id": 32000, "logprob": -12.6328125, "text": "<image>" }, { "id": 32000, "logprob": -11.8046875, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -14.046875, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -17.203125, "text": "<image>" }, { "id": 32000, "logprob": -13.1640625, "text": "<image>" }, { "id": 32000, "logprob": -13.375, "text": "<image>" }, { "id": 32000, "logprob": -14.609375, "text": "<image>" }, { "id": 32000, "logprob": -12.953125, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -15.0234375, "text": "<image>" }, { "id": 32000, "logprob": -11.390625, "text": "<image>" }, { "id": 32000, "logprob": -15.9296875, "text": "<image>" }, { "id": 32000, "logprob": -10.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.828125, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -12.75, "text": "<image>" }, { "id": 32000, "logprob": -14.7734375, "text": "<image>" }, { "id": 32000, "logprob": -11.484375, "text": "<image>" }, { "id": 32000, "logprob": -11.2578125, "text": "<image>" }, { "id": 32000, "logprob": -10.734375, "text": "<image>" }, { "id": 32000, "logprob": -10.875, "text": "<image>" }, { "id": 32000, "logprob": -11.421875, "text": "<image>" }, { "id": 32000, "logprob": -12.96875, "text": "<image>" }, { "id": 32000, "logprob": -13.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.03125, "text": "<image>" }, { "id": 32000, "logprob": -17.015625, "text": "<image>" }, { "id": 32000, "logprob": -12.2109375, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -13.09375, "text": "<image>" }, { "id": 32000, "logprob": -11.84375, "text": "<image>" }, { "id": 32000, "logprob": -15.2578125, "text": "<image>" }, { "id": 32000, "logprob": -16.984375, "text": "<image>" }, { "id": 32000, "logprob": -11.0546875, "text": "<image>" }, { "id": 32000, "logprob": -11.4609375, "text": "<image>" }, { "id": 32000, "logprob": -11.8046875, "text": "<image>" }, { "id": 32000, "logprob": -11.140625, "text": "<image>" }, { "id": 32000, "logprob": -10.6875, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -17.984375, "text": "<image>" }, { "id": 32000, "logprob": -12.4140625, "text": "<image>" }, { "id": 32000, "logprob": -14.703125, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -14.4609375, "text": "<image>" }, { "id": 32000, "logprob": -11.7109375, "text": "<image>" }, { "id": 32000, "logprob": -12.828125, "text": "<image>" }, { "id": 32000, "logprob": -12.8359375, "text": "<image>" }, { "id": 32000, "logprob": -11.484375, "text": "<image>" }, { "id": 32000, "logprob": -13.5390625, "text": "<image>" }, { "id": 32000, "logprob": -16.21875, "text": "<image>" }, { "id": 32000, "logprob": -16.84375, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -14.359375, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -13.6953125, "text": "<image>" }, { "id": 32000, "logprob": -12.984375, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -17.59375, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -15.046875, "text": "<image>" }, { "id": 32000, "logprob": -16.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -13.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -11.3203125, "text": "<image>" }, { "id": 32000, "logprob": -11.359375, "text": "<image>" }, { "id": 32000, "logprob": -12.46875, "text": "<image>" }, { "id": 32000, "logprob": -11.5546875, "text": "<image>" }, { "id": 32000, "logprob": -11.3125, "text": "<image>" }, { "id": 32000, "logprob": -13.1640625, "text": "<image>" }, { "id": 32000, "logprob": -14.5859375, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -11.765625, "text": "<image>" }, { "id": 32000, "logprob": -13.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.4296875, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -12.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.09375, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -11.40625, "text": "<image>" }, { "id": 32000, "logprob": -12.875, "text": "<image>" }, { "id": 32000, "logprob": -11.9609375, "text": "<image>" }, { "id": 32000, "logprob": -13.078125, "text": "<image>" }, { "id": 32000, "logprob": -12.625, "text": "<image>" }, { "id": 32000, "logprob": -11.609375, "text": "<image>" }, { "id": 32000, "logprob": -11.1328125, "text": "<image>" }, { "id": 32000, "logprob": -15.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.0859375, "text": "<image>" }, { "id": 32000, "logprob": -14.25, "text": "<image>" }, { "id": 32000, "logprob": -12.2265625, "text": "<image>" }, { "id": 32000, "logprob": -16.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.8125, "text": "<image>" }, { "id": 32000, "logprob": -12.7890625, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -12.3125, "text": "<image>" }, { "id": 32000, "logprob": -12.3203125, "text": "<image>" }, { "id": 32000, "logprob": -11.9609375, "text": "<image>" }, { "id": 32000, "logprob": -14.2734375, "text": "<image>" }, { "id": 32000, "logprob": -14.90625, "text": "<image>" }, { "id": 32000, "logprob": -12.8125, "text": "<image>" }, { "id": 32000, "logprob": -14.546875, "text": "<image>" }, { "id": 32000, "logprob": -19.4375, "text": "<image>" }, { "id": 32000, "logprob": -12.2109375, "text": "<image>" }, { "id": 32000, "logprob": -13.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.4453125, "text": "<image>" }, { "id": 32000, "logprob": -12.2265625, "text": "<image>" }, { "id": 32000, "logprob": -12.6640625, "text": "<image>" }, { "id": 32000, "logprob": -12.3359375, "text": "<image>" }, { "id": 32000, "logprob": -17.640625, "text": "<image>" }, { "id": 32000, "logprob": -17.734375, "text": "<image>" }, { "id": 32000, "logprob": -11.515625, "text": "<image>" }, { "id": 32000, "logprob": -12.0078125, "text": "<image>" }, { "id": 32000, "logprob": -13.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.5703125, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.328125, "text": "<image>" }, { "id": 32000, "logprob": -13.484375, "text": "<image>" }, { "id": 32000, "logprob": -10.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -12.9375, "text": "<image>" }, { "id": 32000, "logprob": -13.2890625, "text": "<image>" }, { "id": 32000, "logprob": -14.34375, "text": "<image>" }, { "id": 32000, "logprob": -14.5, "text": "<image>" }, { "id": 32000, "logprob": -11.578125, "text": "<image>" }, { "id": 32000, "logprob": -11.6953125, "text": "<image>" }, { "id": 32000, "logprob": -14.875, "text": "<image>" }, { "id": 32000, "logprob": -11.03125, "text": "<image>" }, { "id": 32000, "logprob": -11.34375, "text": "<image>" }, { "id": 32000, "logprob": -11.75, "text": "<image>" }, { "id": 32000, "logprob": -11.7265625, "text": "<image>" }, { "id": 32000, "logprob": -11.8984375, "text": "<image>" }, { "id": 32000, "logprob": -11.9296875, "text": "<image>" }, { "id": 32000, "logprob": -11.328125, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -11.5859375, "text": "<image>" }, { "id": 32000, "logprob": -15.046875, "text": "<image>" }, { "id": 32000, "logprob": -12.2109375, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -16.875, "text": "<image>" }, { "id": 32000, "logprob": -12.1640625, "text": "<image>" }, { "id": 32000, "logprob": -14.7890625, "text": "<image>" }, { "id": 32000, "logprob": -11.9609375, "text": "<image>" }, { "id": 32000, "logprob": -12.796875, "text": "<image>" }, { "id": 32000, "logprob": -11.984375, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -12.703125, "text": "<image>" }, { "id": 32000, "logprob": -14.4765625, "text": "<image>" }, { "id": 32000, "logprob": -10.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.0234375, "text": "<image>" }, { "id": 32000, "logprob": -11.5625, "text": "<image>" }, { "id": 32000, "logprob": -14.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.8125, "text": "<image>" }, { "id": 32000, "logprob": -12.4453125, "text": "<image>" }, { "id": 32000, "logprob": -12.6640625, "text": "<image>" }, { "id": 32000, "logprob": -12.0078125, "text": "<image>" }, { "id": 32000, "logprob": -12.1640625, "text": "<image>" }, { "id": 32000, "logprob": -14.875, "text": "<image>" }, { "id": 32000, "logprob": -11.59375, "text": "<image>" }, { "id": 32000, "logprob": -10.75, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -13.265625, "text": "<image>" }, { "id": 32000, "logprob": -11.1328125, "text": "<image>" }, { "id": 32000, "logprob": -16.921875, "text": "<image>" }, { "id": 32000, "logprob": -12.8828125, "text": "<image>" }, { "id": 32000, "logprob": -13.53125, "text": "<image>" }, { "id": 32000, "logprob": -14.625, "text": "<image>" }, { "id": 32000, "logprob": -10.9375, "text": "<image>" }, { "id": 32000, "logprob": -11.390625, "text": "<image>" }, { "id": 32000, "logprob": -17.09375, "text": "<image>" }, { "id": 32000, "logprob": -13.7421875, "text": "<image>" }, { "id": 32000, "logprob": -12.84375, "text": "<image>" }, { "id": 32000, "logprob": -11.2734375, "text": "<image>" }, { "id": 32000, "logprob": -11.734375, "text": "<image>" }, { "id": 32000, "logprob": -11.2109375, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -14.328125, "text": "<image>" }, { "id": 32000, "logprob": -11.9375, "text": "<image>" }, { "id": 32000, "logprob": -13.1875, "text": "<image>" }, { "id": 32000, "logprob": -11.421875, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -11.078125, "text": "<image>" }, { "id": 32000, "logprob": -11.609375, "text": "<image>" }, { "id": 32000, "logprob": -12.6328125, "text": "<image>" }, { "id": 32000, "logprob": -10.890625, "text": "<image>" }, { "id": 32000, "logprob": -12.71875, "text": "<image>" }, { "id": 32000, "logprob": -13.453125, "text": "<image>" }, { "id": 32000, "logprob": -12.4140625, "text": "<image>" }, { "id": 32000, "logprob": -12.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -16.546875, "text": "<image>" }, { "id": 32000, "logprob": -13.15625, "text": "<image>" }, { "id": 32000, "logprob": -11.0546875, "text": "<image>" }, { "id": 32000, "logprob": -13.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.828125, "text": "<image>" }, { "id": 32000, "logprob": -14.3515625, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -12.84375, "text": "<image>" }, { "id": 32000, "logprob": -12.9296875, "text": "<image>" }, { "id": 32000, "logprob": -11.46875, "text": "<image>" }, { "id": 32000, "logprob": -11.625, "text": "<image>" }, { "id": 32000, "logprob": -14.125, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -14.703125, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -10.75, "text": "<image>" }, { "id": 32000, "logprob": -12.78125, "text": "<image>" }, { "id": 32000, "logprob": -15.140625, "text": "<image>" }, { "id": 32000, "logprob": -13.59375, "text": "<image>" }, { "id": 32000, "logprob": -11.6953125, "text": "<image>" }, { "id": 32000, "logprob": -11.390625, "text": "<image>" }, { "id": 32000, "logprob": -15.1953125, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.765625, "text": "<image>" }, { "id": 32000, "logprob": -12.921875, "text": "<image>" }, { "id": 32000, "logprob": -13.453125, "text": "<image>" }, { "id": 32000, "logprob": -15.5078125, "text": "<image>" }, { "id": 32000, "logprob": -12.3984375, "text": "<image>" }, { "id": 32000, "logprob": -13.78125, "text": "<image>" }, { "id": 32000, "logprob": -12.1875, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -12.8984375, "text": "<image>" }, { "id": 32000, "logprob": -11.7109375, "text": "<image>" }, { "id": 32000, "logprob": -11.734375, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -16.375, "text": "<image>" }, { "id": 32000, "logprob": -12.5, "text": "<image>" }, { "id": 32000, "logprob": -11.9609375, "text": "<image>" }, { "id": 32000, "logprob": -13.875, "text": "<image>" }, { "id": 32000, "logprob": -15.0, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -16.03125, "text": "<image>" }, { "id": 32000, "logprob": -11.734375, "text": "<image>" }, { "id": 32000, "logprob": -13.5859375, "text": "<image>" }, { "id": 32000, "logprob": -15.3359375, "text": "<image>" }, { "id": 32000, "logprob": -12.2421875, "text": "<image>" }, { "id": 32000, "logprob": -11.703125, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -16.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -14.578125, "text": "<image>" }, { "id": 32000, "logprob": -11.625, "text": "<image>" }, { "id": 32000, "logprob": -12.109375, "text": "<image>" }, { "id": 32000, "logprob": -12.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -15.8203125, "text": "<image>" }, { "id": 32000, "logprob": -17.703125, "text": "<image>" }, { "id": 32000, "logprob": -12.09375, "text": "<image>" }, { "id": 32000, "logprob": -11.828125, "text": "<image>" }, { "id": 32000, "logprob": -11.703125, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -13.53125, "text": "<image>" }, { "id": 32000, "logprob": -11.140625, "text": "<image>" }, { "id": 32000, "logprob": -16.75, "text": "<image>" }, { "id": 32000, "logprob": -13.0625, "text": "<image>" }, { "id": 32000, "logprob": -14.90625, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -15.1640625, "text": "<image>" }, { "id": 32000, "logprob": -11.828125, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.28125, "text": "<image>" }, { "id": 32000, "logprob": -16.953125, "text": "<image>" }, { "id": 32000, "logprob": -13.0546875, "text": "<image>" }, { "id": 32000, "logprob": -14.890625, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -11.375, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -12.34375, "text": "<image>" }, { "id": 32000, "logprob": -11.84375, "text": "<image>" }, { "id": 32000, "logprob": -13.7578125, "text": "<image>" }, { "id": 32000, "logprob": -12.6875, "text": "<image>" }, { "id": 32000, "logprob": -14.71875, "text": "<image>" }, { "id": 32000, "logprob": -12.578125, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -11.6875, "text": "<image>" }, { "id": 32000, "logprob": -11.40625, "text": "<image>" }, { "id": 32000, "logprob": -11.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.8046875, "text": "<image>" }, { "id": 32000, "logprob": -12.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -13.5234375, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -11.765625, "text": "<image>" }, { "id": 32000, "logprob": -12.421875, "text": "<image>" }, { "id": 32000, "logprob": -18.65625, "text": "<image>" }, { "id": 32000, "logprob": -15.6796875, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -13.6953125, "text": "<image>" }, { "id": 32000, "logprob": -15.0859375, "text": "<image>" }, { "id": 32000, "logprob": -14.0703125, "text": "<image>" }, { "id": 32000, "logprob": -14.53125, "text": "<image>" }, { "id": 32000, "logprob": -12.359375, "text": "<image>" }, { "id": 32000, "logprob": -12.0625, "text": "<image>" }, { "id": 32000, "logprob": -13.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.8203125, "text": "<image>" }, { "id": 32000, "logprob": -13.046875, "text": "<image>" }, { "id": 32000, "logprob": -13.078125, "text": "<image>" }, { "id": 32000, "logprob": -12.625, "text": "<image>" }, { "id": 32000, "logprob": -11.3203125, "text": "<image>" }, { "id": 32000, "logprob": -14.2109375, "text": "<image>" }, { "id": 32000, "logprob": -11.734375, "text": "<image>" }, { "id": 32000, "logprob": -13.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.578125, "text": "<image>" }, { "id": 32000, "logprob": -11.703125, "text": "<image>" }, { "id": 32000, "logprob": -10.65625, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -11.8984375, "text": "<image>" }, { "id": 32000, "logprob": -11.7578125, "text": "<image>" }, { "id": 32000, "logprob": -12.890625, "text": "<image>" }, { "id": 32000, "logprob": -11.453125, "text": "<image>" }, { "id": 32000, "logprob": -14.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.953125, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -12.53125, "text": "<image>" }, { "id": 32000, "logprob": -10.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.2265625, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -13.5546875, "text": "<image>" }, { "id": 32000, "logprob": -16.6875, "text": "<image>" }, { "id": 32000, "logprob": -11.96875, "text": "<image>" }, { "id": 32000, "logprob": -12.8203125, "text": "<image>" }, { "id": 32000, "logprob": -14.9140625, "text": "<image>" }, { "id": 32000, "logprob": -16.5625, "text": "<image>" }, { "id": 32000, "logprob": -15.171875, "text": "<image>" }, { "id": 32000, "logprob": -14.6796875, "text": "<image>" }, { "id": 32000, "logprob": -12.03125, "text": "<image>" }, { "id": 32000, "logprob": -12.9375, "text": "<image>" }, { "id": 32000, "logprob": -12.2109375, "text": "<image>" }, { "id": 32000, "logprob": -12.7421875, "text": "<image>" }, { "id": 32000, "logprob": -13.3515625, "text": "<image>" }, { "id": 32000, "logprob": -13.7890625, "text": "<image>" }, { "id": 32000, "logprob": -12.5, "text": "<image>" }, { "id": 32000, "logprob": -12.140625, "text": "<image>" }, { "id": 32000, "logprob": -15.4453125, "text": "<image>" }, { "id": 32000, "logprob": -14.640625, "text": "<image>" }, { "id": 32000, "logprob": -13.6171875, "text": "<image>" }, { "id": 32000, "logprob": -13.796875, "text": "<image>" }, { "id": 32000, "logprob": -12.7109375, "text": "<image>" }, { "id": 32000, "logprob": -13.2578125, "text": "<image>" }, { "id": 32000, "logprob": -12.96875, "text": "<image>" }, { "id": 32000, "logprob": -13.5390625, "text": "<image>" }, { "id": 32000, "logprob": -13.265625, "text": "<image>" }, { "id": 32000, "logprob": -12.4453125, "text": "<image>" }, { "id": 32000, "logprob": -11.390625, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -16.5625, "text": "<image>" }, { "id": 32000, "logprob": -14.171875, "text": "<image>" }, { "id": 32000, "logprob": -13.2265625, "text": "<image>" }, { "id": 32000, "logprob": -11.7890625, "text": "<image>" }, { "id": 32000, "logprob": -15.1171875, "text": "<image>" }, { "id": 32000, "logprob": -15.4453125, "text": "<image>" }, { "id": 32000, "logprob": -12.6015625, "text": "<image>" }, { "id": 32000, "logprob": -11.4921875, "text": "<image>" }, { "id": 32000, "logprob": -10.703125, "text": "<image>" }, { "id": 32000, "logprob": -11.15625, "text": "<image>" }, { "id": 32000, "logprob": -12.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.734375, "text": "<image>" }, { "id": 32000, "logprob": -17.953125, "text": "<image>" }, { "id": 32000, "logprob": -11.59375, "text": "<image>" }, { "id": 32000, "logprob": -11.625, "text": "<image>" }, { "id": 32000, "logprob": -14.921875, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.8515625, "text": "<image>" }, { "id": 32000, "logprob": -15.3046875, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -12.7265625, "text": "<image>" }, { "id": 32000, "logprob": -15.140625, "text": "<image>" }, { "id": 32000, "logprob": -14.9375, "text": "<image>" }, { "id": 32000, "logprob": -15.6875, "text": "<image>" }, { "id": 32000, "logprob": -16.015625, "text": "<image>" }, { "id": 32000, "logprob": -14.125, "text": "<image>" }, { "id": 32000, "logprob": -13.2265625, "text": "<image>" }, { "id": 32000, "logprob": -13.1640625, "text": "<image>" }, { "id": 32000, "logprob": -12.703125, "text": "<image>" }, { "id": 32000, "logprob": -11.5625, "text": "<image>" }, { "id": 32000, "logprob": -14.765625, "text": "<image>" }, { "id": 32000, "logprob": -14.3515625, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.9296875, "text": "<image>" }, { "id": 32000, "logprob": -15.296875, "text": "<image>" }, { "id": 32000, "logprob": -13.6328125, "text": "<image>" }, { "id": 32000, "logprob": -11.4296875, "text": "<image>" }, { "id": 32000, "logprob": -13.5078125, "text": "<image>" }, { "id": 32000, "logprob": -10.890625, "text": "<image>" }, { "id": 32000, "logprob": -14.1171875, "text": "<image>" }, { "id": 32000, "logprob": -12.15625, "text": "<image>" }, { "id": 32000, "logprob": -13.1171875, "text": "<image>" }, { "id": 32000, "logprob": -12.5625, "text": "<image>" }, { "id": 32000, "logprob": -13.359375, "text": "<image>" }, { "id": 32000, "logprob": -11.734375, "text": "<image>" }, { "id": 32000, "logprob": -10.953125, "text": "<image>" }, { "id": 32000, "logprob": -12.75, "text": "<image>" }, { "id": 32000, "logprob": -12.7734375, "text": "<image>" }, { "id": 32000, "logprob": -13.5, "text": "<image>" }, { "id": 32000, "logprob": -11.578125, "text": "<image>" }, { "id": 32000, "logprob": -11.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.5390625, "text": "<image>" }, { "id": 32000, "logprob": -13.3984375, "text": "<image>" }, { "id": 32000, "logprob": -19.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -11.609375, "text": "<image>" }, { "id": 32000, "logprob": -13.890625, "text": "<image>" }, { "id": 32000, "logprob": -13.3203125, "text": "<image>" }, { "id": 32000, "logprob": -12.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.0625, "text": "<image>" }, { "id": 32000, "logprob": -11.6953125, "text": "<image>" }, { "id": 32000, "logprob": -13.7265625, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -13.9140625, "text": "<image>" }, { "id": 32000, "logprob": -11.7109375, "text": "<image>" }, { "id": 32000, "logprob": -16.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.6953125, "text": "<image>" }, { "id": 32000, "logprob": -13.4765625, "text": "<image>" }, { "id": 32000, "logprob": -15.2421875, "text": "<image>" }, { "id": 32000, "logprob": -11.53125, "text": "<image>" }, { "id": 32000, "logprob": -14.578125, "text": "<image>" }, { "id": 32000, "logprob": -15.28125, "text": "<image>" }, { "id": 32000, "logprob": -14.0390625, "text": "<image>" }, { "id": 32000, "logprob": -12.9296875, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.296875, "text": "<image>" }, { "id": 32000, "logprob": -14.2421875, "text": "<image>" }, { "id": 32000, "logprob": -12.0703125, "text": "<image>" }, { "id": 32000, "logprob": -12.609375, "text": "<image>" }, { "id": 32000, "logprob": -13.5546875, "text": "<image>" }, { "id": 32000, "logprob": -11.265625, "text": "<image>" }, { "id": 32000, "logprob": -11.7421875, "text": "<image>" }, { "id": 32000, "logprob": -15.0546875, "text": "<image>" }, { "id": 32000, "logprob": -14.390625, "text": "<image>" }, { "id": 32000, "logprob": -11.1796875, "text": "<image>" }, { "id": 32000, "logprob": -13.1171875, "text": "<image>" }, { "id": 32000, "logprob": -11.421875, "text": "<image>" }, { "id": 32000, "logprob": -15.8203125, "text": "<image>" }, { "id": 32000, "logprob": -12.7734375, "text": "<image>" }, { "id": 32000, "logprob": -13.3125, "text": "<image>" }, { "id": 32000, "logprob": -11.4453125, "text": "<image>" }, { "id": 32000, "logprob": -14.2578125, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -12.546875, "text": "<image>" }, { "id": 32000, "logprob": -16.34375, "text": "<image>" }, { "id": 32000, "logprob": -13.875, "text": "<image>" }, { "id": 32000, "logprob": -14.6484375, "text": "<image>" }, { "id": 32000, "logprob": -12.6015625, "text": "<image>" }, { "id": 32000, "logprob": -11.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.515625, "text": "<image>" }, { "id": 32000, "logprob": -12.7578125, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -14.5703125, "text": "<image>" }, { "id": 32000, "logprob": -11.3515625, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -10.9296875, "text": "<image>" }, { "id": 32000, "logprob": -13.203125, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -12.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.953125, "text": "<image>" }, { "id": 32000, "logprob": -12.90625, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -11.4453125, "text": "<image>" }, { "id": 32000, "logprob": -14.9453125, "text": "<image>" }, { "id": 32000, "logprob": -13.625, "text": "<image>" }, { "id": 32000, "logprob": -11.2890625, "text": "<image>" }, { "id": 32000, "logprob": -12.9609375, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -12.5703125, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -12.328125, "text": "<image>" }, { "id": 32000, "logprob": -15.1875, "text": "<image>" }, { "id": 32000, "logprob": -11.7421875, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -15.0625, "text": "<image>" }, { "id": 32000, "logprob": -15.5703125, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.3359375, "text": "<image>" }, { "id": 32000, "logprob": -14.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.6171875, "text": "<image>" }, { "id": 32000, "logprob": -14.6640625, "text": "<image>" }, { "id": 32000, "logprob": -15.34375, "text": "<image>" }, { "id": 32000, "logprob": -15.2109375, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -13.703125, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -11.7890625, "text": "<image>" }, { "id": 32000, "logprob": -12.0, "text": "<image>" }, { "id": 32000, "logprob": -12.8828125, "text": "<image>" }, { "id": 32000, "logprob": -16.375, "text": "<image>" }, { "id": 32000, "logprob": -12.78125, "text": "<image>" }, { "id": 32000, "logprob": -12.4453125, "text": "<image>" }, { "id": 32000, "logprob": -13.3828125, "text": "<image>" }, { "id": 32000, "logprob": -13.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.8828125, "text": "<image>" }, { "id": 32000, "logprob": -11.453125, "text": "<image>" }, { "id": 32000, "logprob": -12.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.84375, "text": "<image>" }, { "id": 32000, "logprob": -11.2734375, "text": "<image>" }, { "id": 32000, "logprob": -14.71875, "text": "<image>" }, { "id": 32000, "logprob": -14.265625, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -13.6015625, "text": "<image>" }, { "id": 32000, "logprob": -13.6484375, "text": "<image>" }, { "id": 32000, "logprob": -11.3046875, "text": "<image>" }, { "id": 32000, "logprob": -11.96875, "text": "<image>" }, { "id": 32000, "logprob": -13.3984375, "text": "<image>" }, { "id": 32000, "logprob": -12.921875, "text": "<image>" }, { "id": 32000, "logprob": -14.2109375, "text": "<image>" }, { "id": 32000, "logprob": -12.875, "text": "<image>" }, { "id": 32000, "logprob": -13.875, "text": "<image>" }, { "id": 32000, "logprob": -13.359375, "text": "<image>" }, { "id": 32000, "logprob": -13.2109375, "text": "<image>" }, { "id": 32000, "logprob": -11.984375, "text": "<image>" }, { "id": 32000, "logprob": -15.140625, "text": "<image>" }, { "id": 32000, "logprob": -12.015625, "text": "<image>" }, { "id": 32000, "logprob": -11.03125, "text": "<image>" }, { "id": 32000, "logprob": -14.40625, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -13.7734375, "text": "<image>" }, { "id": 32000, "logprob": -13.5546875, "text": "<image>" }, { "id": 32000, "logprob": -11.7265625, "text": "<image>" }, { "id": 32000, "logprob": -13.390625, "text": "<image>" }, { "id": 32000, "logprob": -16.84375, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -17.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -12.71875, "text": "<image>" }, { "id": 32000, "logprob": -16.0, "text": "<image>" }, { "id": 32000, "logprob": -16.625, "text": "<image>" }, { "id": 32000, "logprob": -12.4921875, "text": "<image>" }, { "id": 2418, "logprob": -19.0625, "text": "Can" }, { "id": 368, "logprob": -0.19604492, "text": "you" }, { "id": 1912, "logprob": -1.5058594, "text": "tell" }, { "id": 528, "logprob": -0.31030273, "text": "me" }, { "id": 264, "logprob": -2.6367188, "text": "a" }, { "id": 1215, "logprob": -9.1015625, "text": "very" }, { "id": 2485, "logprob": -0.9975586, "text": "short" }, { "id": 2838, "logprob": -0.4633789, "text": "story" }, { "id": 2818, "logprob": -3.3144531, "text": "based" }, { "id": 356, "logprob": -0.029037476, "text": "on" }, { "id": 272, "logprob": -0.9902344, "text": "the" }, { "id": 3469, "logprob": -0.2890625, "text": "image" }, { "id": 28804, "logprob": -0.42895508, "text": "?" } ], "seed": null, "tokens": [ { "id": 13, "logprob": -0.007621765, "special": false, "text": "\n" }, { "id": 13, "logprob": -0.20275879, "special": false, "text": "\n" }, { "id": 16114, "logprob": -1.2578125, "special": false, "text": "Once" }, { "id": 3714, "logprob": -0.2084961, "special": false, "text": " upon" }, { "id": 264, "logprob": -0.0017738342, "special": false, "text": " a" }, { "id": 727, "logprob": -0.011932373, "special": false, "text": " time" }, { "id": 28725, "logprob": -0.17297363, "special": false, "text": "," }, { "id": 736, "logprob": -0.9057617, "special": false, "text": " there" }, { "id": 403, "logprob": -0.05758667, "special": false, "text": " was" }, { "id": 264, "logprob": -0.00970459, "special": false, "text": " a" } ], "top_tokens": null }, "generated_text": "\n\nOnce upon a time, there was a" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 1247, "logprob": -2.390625, "text": "User" }, { "id": 28747, "logprob": -12.328125, "text": ":" }, { "id": 32000, "logprob": -10.4375, "text": "<image>" }, { "id": 32000, "logprob": -10.6953125, "text": "<image>" }, { "id": 32000, "logprob": -15.828125, "text": "<image>" }, { "id": 32000, "logprob": -10.40625, "text": "<image>" }, { "id": 32000, "logprob": -10.03125, "text": "<image>" }, { "id": 32000, "logprob": -10.140625, "text": "<image>" }, { "id": 32000, "logprob": -10.3125, "text": "<image>" }, { "id": 32000, "logprob": -12.6640625, "text": "<image>" }, { "id": 32000, "logprob": -15.4921875, "text": "<image>" }, { "id": 32000, "logprob": -13.8671875, "text": "<image>" }, { "id": 32000, "logprob": -11.3671875, "text": "<image>" }, { "id": 32000, "logprob": -10.1015625, "text": "<image>" }, { "id": 32000, "logprob": -10.1796875, "text": "<image>" }, { "id": 32000, "logprob": -10.2421875, "text": "<image>" }, { "id": 32000, "logprob": -10.4609375, "text": "<image>" }, { "id": 32000, "logprob": -14.34375, "text": "<image>" }, { "id": 32000, "logprob": -13.1796875, "text": "<image>" }, { "id": 32000, "logprob": -13.328125, "text": "<image>" }, { "id": 32000, "logprob": -14.3515625, "text": "<image>" }, { "id": 32000, "logprob": -11.0546875, "text": "<image>" }, { "id": 32000, "logprob": -10.9140625, "text": "<image>" }, { "id": 32000, "logprob": -10.515625, "text": "<image>" }, { "id": 32000, "logprob": -10.4453125, "text": "<image>" }, { "id": 32000, "logprob": -14.2421875, "text": "<image>" }, { "id": 32000, "logprob": -13.59375, "text": "<image>" }, { "id": 32000, "logprob": -11.234375, "text": "<image>" }, { "id": 32000, "logprob": -10.8125, "text": "<image>" }, { "id": 32000, "logprob": -17.234375, "text": "<image>" }, { "id": 32000, "logprob": -10.3984375, "text": "<image>" }, { "id": 32000, "logprob": -10.390625, "text": "<image>" }, { "id": 32000, "logprob": -10.6484375, "text": "<image>" }, { "id": 32000, "logprob": -17.984375, "text": "<image>" }, { "id": 32000, "logprob": -10.84375, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -9.8671875, "text": "<image>" }, { "id": 32000, "logprob": -10.7578125, "text": "<image>" }, { "id": 32000, "logprob": -10.5390625, "text": "<image>" }, { "id": 32000, "logprob": -10.984375, "text": "<image>" }, { "id": 32000, "logprob": -10.625, "text": "<image>" }, { "id": 32000, "logprob": -11.109375, "text": "<image>" }, { "id": 32000, "logprob": -10.5, "text": "<image>" }, { "id": 32000, "logprob": -11.4609375, "text": "<image>" }, { "id": 32000, "logprob": -13.09375, "text": "<image>" }, { "id": 32000, "logprob": -10.8203125, "text": "<image>" }, { "id": 32000, "logprob": -10.5390625, "text": "<image>" }, { "id": 32000, "logprob": -10.625, "text": "<image>" }, { "id": 32000, "logprob": -10.9296875, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -10.578125, "text": "<image>" }, { "id": 32000, "logprob": -10.7265625, "text": "<image>" }, { "id": 32000, "logprob": -11.40625, "text": "<image>" }, { "id": 32000, "logprob": -10.359375, "text": "<image>" }, { "id": 32000, "logprob": -11.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.0390625, "text": "<image>" }, { "id": 32000, "logprob": -18.703125, "text": "<image>" }, { "id": 32000, "logprob": -11.015625, "text": "<image>" }, { "id": 32000, "logprob": -15.6796875, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -10.4140625, "text": "<image>" }, { "id": 32000, "logprob": -13.015625, "text": "<image>" }, { "id": 32000, "logprob": -11.8046875, "text": "<image>" }, { "id": 32000, "logprob": -10.5703125, "text": "<image>" }, { "id": 32000, "logprob": -10.8125, "text": "<image>" }, { "id": 32000, "logprob": -10.8046875, "text": "<image>" }, { "id": 32000, "logprob": -10.2734375, "text": "<image>" }, { "id": 32000, "logprob": -10.234375, "text": "<image>" }, { "id": 32000, "logprob": -10.265625, "text": "<image>" }, { "id": 32000, "logprob": -11.140625, "text": "<image>" }, { "id": 32000, "logprob": -12.8125, "text": "<image>" }, { "id": 32000, "logprob": -10.671875, "text": "<image>" }, { "id": 32000, "logprob": -11.40625, "text": "<image>" }, { "id": 32000, "logprob": -12.8046875, "text": "<image>" }, { "id": 32000, "logprob": -10.8125, "text": "<image>" }, { "id": 32000, "logprob": -11.15625, "text": "<image>" }, { "id": 32000, "logprob": -10.5234375, "text": "<image>" }, { "id": 32000, "logprob": -15.3203125, "text": "<image>" }, { "id": 32000, "logprob": -10.2265625, "text": "<image>" }, { "id": 32000, "logprob": -11.0546875, "text": "<image>" }, { "id": 32000, "logprob": -14.671875, "text": "<image>" }, { "id": 32000, "logprob": -10.375, "text": "<image>" }, { "id": 32000, "logprob": -11.34375, "text": "<image>" }, { "id": 32000, "logprob": -12.5234375, "text": "<image>" }, { "id": 32000, "logprob": -12.4921875, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -11.0859375, "text": "<image>" }, { "id": 32000, "logprob": -10.9765625, "text": "<image>" }, { "id": 32000, "logprob": -10.921875, "text": "<image>" }, { "id": 32000, "logprob": -11.0234375, "text": "<image>" }, { "id": 32000, "logprob": -10.96875, "text": "<image>" }, { "id": 32000, "logprob": -11.1484375, "text": "<image>" }, { "id": 32000, "logprob": -10.71875, "text": "<image>" }, { "id": 32000, "logprob": -10.734375, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -10.890625, "text": "<image>" }, { "id": 32000, "logprob": -13.375, "text": "<image>" }, { "id": 32000, "logprob": -10.984375, "text": "<image>" }, { "id": 32000, "logprob": -10.6640625, "text": "<image>" }, { "id": 32000, "logprob": -10.7265625, "text": "<image>" }, { "id": 32000, "logprob": -10.75, "text": "<image>" }, { "id": 32000, "logprob": -10.640625, "text": "<image>" }, { "id": 32000, "logprob": -10.6796875, "text": "<image>" }, { "id": 32000, "logprob": -11.828125, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -12.2265625, "text": "<image>" }, { "id": 32000, "logprob": -10.9765625, "text": "<image>" }, { "id": 32000, "logprob": -12.5, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -11.2421875, "text": "<image>" }, { "id": 32000, "logprob": -11.890625, "text": "<image>" }, { "id": 32000, "logprob": -11.2265625, "text": "<image>" }, { "id": 32000, "logprob": -9.984375, "text": "<image>" }, { "id": 32000, "logprob": -10.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -11.109375, "text": "<image>" }, { "id": 32000, "logprob": -14.03125, "text": "<image>" }, { "id": 32000, "logprob": -10.8984375, "text": "<image>" }, { "id": 32000, "logprob": -10.953125, "text": "<image>" }, { "id": 32000, "logprob": -10.9296875, "text": "<image>" }, { "id": 32000, "logprob": -11.78125, "text": "<image>" }, { "id": 32000, "logprob": -15.1484375, "text": "<image>" }, { "id": 32000, "logprob": -10.96875, "text": "<image>" }, { "id": 32000, "logprob": -10.890625, "text": "<image>" }, { "id": 32000, "logprob": -10.890625, "text": "<image>" }, { "id": 32000, "logprob": -10.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.453125, "text": "<image>" }, { "id": 32000, "logprob": -13.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.484375, "text": "<image>" }, { "id": 32000, "logprob": -11.359375, "text": "<image>" }, { "id": 32000, "logprob": -12.3359375, "text": "<image>" }, { "id": 32000, "logprob": -11.59375, "text": "<image>" }, { "id": 32000, "logprob": -12.5, "text": "<image>" }, { "id": 32000, "logprob": -12.6328125, "text": "<image>" }, { "id": 32000, "logprob": -11.203125, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -10.9296875, "text": "<image>" }, { "id": 32000, "logprob": -10.7421875, "text": "<image>" }, { "id": 32000, "logprob": -15.390625, "text": "<image>" }, { "id": 32000, "logprob": -15.0234375, "text": "<image>" }, { "id": 32000, "logprob": -12.75, "text": "<image>" }, { "id": 32000, "logprob": -12.421875, "text": "<image>" }, { "id": 32000, "logprob": -12.2109375, "text": "<image>" }, { "id": 32000, "logprob": -11.265625, "text": "<image>" }, { "id": 32000, "logprob": -13.359375, "text": "<image>" }, { "id": 32000, "logprob": -10.640625, "text": "<image>" }, { "id": 32000, "logprob": -10.5859375, "text": "<image>" }, { "id": 32000, "logprob": -11.7578125, "text": "<image>" }, { "id": 32000, "logprob": -11.734375, "text": "<image>" }, { "id": 32000, "logprob": -10.421875, "text": "<image>" }, { "id": 32000, "logprob": -10.5859375, "text": "<image>" }, { "id": 32000, "logprob": -10.4609375, "text": "<image>" }, { "id": 32000, "logprob": -17.53125, "text": "<image>" }, { "id": 32000, "logprob": -11.4765625, "text": "<image>" }, { "id": 32000, "logprob": -14.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.0078125, "text": "<image>" }, { "id": 32000, "logprob": -12.8359375, "text": "<image>" }, { "id": 32000, "logprob": -11.25, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -12.6875, "text": "<image>" }, { "id": 32000, "logprob": -11.109375, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -10.3359375, "text": "<image>" }, { "id": 32000, "logprob": -13.46875, "text": "<image>" }, { "id": 32000, "logprob": -11.8671875, "text": "<image>" }, { "id": 32000, "logprob": -10.84375, "text": "<image>" }, { "id": 32000, "logprob": -11.0078125, "text": "<image>" }, { "id": 32000, "logprob": -11.234375, "text": "<image>" }, { "id": 32000, "logprob": -13.3359375, "text": "<image>" }, { "id": 32000, "logprob": -10.5546875, "text": "<image>" }, { "id": 32000, "logprob": -10.6796875, "text": "<image>" }, { "id": 32000, "logprob": -10.8203125, "text": "<image>" }, { "id": 32000, "logprob": -10.9921875, "text": "<image>" }, { "id": 32000, "logprob": -10.9921875, "text": "<image>" }, { "id": 32000, "logprob": -10.6328125, "text": "<image>" }, { "id": 32000, "logprob": -12.2578125, "text": "<image>" }, { "id": 32000, "logprob": -10.8203125, "text": "<image>" }, { "id": 32000, "logprob": -12.265625, "text": "<image>" }, { "id": 32000, "logprob": -12.25, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -10.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.8125, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -12.828125, "text": "<image>" }, { "id": 32000, "logprob": -11.328125, "text": "<image>" }, { "id": 32000, "logprob": -10.296875, "text": "<image>" }, { "id": 32000, "logprob": -10.796875, "text": "<image>" }, { "id": 32000, "logprob": -11.0390625, "text": "<image>" }, { "id": 32000, "logprob": -11.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.2109375, "text": "<image>" }, { "id": 32000, "logprob": -11.5078125, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -10.5546875, "text": "<image>" }, { "id": 32000, "logprob": -10.3515625, "text": "<image>" }, { "id": 32000, "logprob": -10.609375, "text": "<image>" }, { "id": 32000, "logprob": -10.796875, "text": "<image>" }, { "id": 32000, "logprob": -10.828125, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -11.9375, "text": "<image>" }, { "id": 32000, "logprob": -12.7109375, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -11.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -13.3984375, "text": "<image>" }, { "id": 32000, "logprob": -11.53125, "text": "<image>" }, { "id": 32000, "logprob": -14.1875, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -12.421875, "text": "<image>" }, { "id": 32000, "logprob": -10.90625, "text": "<image>" }, { "id": 32000, "logprob": -10.5859375, "text": "<image>" }, { "id": 32000, "logprob": -10.6015625, "text": "<image>" }, { "id": 32000, "logprob": -10.90625, "text": "<image>" }, { "id": 32000, "logprob": -11.2265625, "text": "<image>" }, { "id": 32000, "logprob": -10.90625, "text": "<image>" }, { "id": 32000, "logprob": -10.9765625, "text": "<image>" }, { "id": 32000, "logprob": -12.03125, "text": "<image>" }, { "id": 32000, "logprob": -10.8671875, "text": "<image>" }, { "id": 32000, "logprob": -10.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.8984375, "text": "<image>" }, { "id": 32000, "logprob": -10.7421875, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -11.4375, "text": "<image>" }, { "id": 32000, "logprob": -11.28125, "text": "<image>" }, { "id": 32000, "logprob": -10.9296875, "text": "<image>" }, { "id": 32000, "logprob": -11.5078125, "text": "<image>" }, { "id": 32000, "logprob": -13.578125, "text": "<image>" }, { "id": 32000, "logprob": -13.875, "text": "<image>" }, { "id": 32000, "logprob": -14.3828125, "text": "<image>" }, { "id": 32000, "logprob": -11.3515625, "text": "<image>" }, { "id": 32000, "logprob": -13.3125, "text": "<image>" }, { "id": 32000, "logprob": -10.8828125, "text": "<image>" }, { "id": 32000, "logprob": -12.703125, "text": "<image>" }, { "id": 32000, "logprob": -10.953125, "text": "<image>" }, { "id": 32000, "logprob": -10.7734375, "text": "<image>" }, { "id": 32000, "logprob": -12.5, "text": "<image>" }, { "id": 32000, "logprob": -11.390625, "text": "<image>" }, { "id": 32000, "logprob": -11.140625, "text": "<image>" }, { "id": 32000, "logprob": -10.8515625, "text": "<image>" }, { "id": 32000, "logprob": -12.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.75, "text": "<image>" }, { "id": 32000, "logprob": -10.8671875, "text": "<image>" }, { "id": 32000, "logprob": -13.8046875, "text": "<image>" }, { "id": 32000, "logprob": -11.828125, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -11.3984375, "text": "<image>" }, { "id": 32000, "logprob": -14.2890625, "text": "<image>" }, { "id": 32000, "logprob": -13.578125, "text": "<image>" }, { "id": 32000, "logprob": -13.46875, "text": "<image>" }, { "id": 32000, "logprob": -11.8828125, "text": "<image>" }, { "id": 32000, "logprob": -12.84375, "text": "<image>" }, { "id": 32000, "logprob": -14.90625, "text": "<image>" }, { "id": 32000, "logprob": -13.3125, "text": "<image>" }, { "id": 32000, "logprob": -11.140625, "text": "<image>" }, { "id": 32000, "logprob": -13.0703125, "text": "<image>" }, { "id": 32000, "logprob": -12.4296875, "text": "<image>" }, { "id": 32000, "logprob": -12.8671875, "text": "<image>" }, { "id": 32000, "logprob": -11.1953125, "text": "<image>" }, { "id": 32000, "logprob": -15.140625, "text": "<image>" }, { "id": 32000, "logprob": -12.625, "text": "<image>" }, { "id": 32000, "logprob": -13.1875, "text": "<image>" }, { "id": 32000, "logprob": -12.0859375, "text": "<image>" }, { "id": 32000, "logprob": -12.1875, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -11.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.9375, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -11.7421875, "text": "<image>" }, { "id": 32000, "logprob": -11.828125, "text": "<image>" }, { "id": 32000, "logprob": -12.1953125, "text": "<image>" }, { "id": 32000, "logprob": -12.4921875, "text": "<image>" }, { "id": 32000, "logprob": -11.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.0859375, "text": "<image>" }, { "id": 32000, "logprob": -11.265625, "text": "<image>" }, { "id": 32000, "logprob": -11.6953125, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -13.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.7109375, "text": "<image>" }, { "id": 32000, "logprob": -15.03125, "text": "<image>" }, { "id": 32000, "logprob": -12.5390625, "text": "<image>" }, { "id": 32000, "logprob": -11.859375, "text": "<image>" }, { "id": 32000, "logprob": -13.1171875, "text": "<image>" }, { "id": 32000, "logprob": -12.296875, "text": "<image>" }, { "id": 32000, "logprob": -12.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.1640625, "text": "<image>" }, { "id": 32000, "logprob": -12.96875, "text": "<image>" }, { "id": 32000, "logprob": -11.5078125, "text": "<image>" }, { "id": 32000, "logprob": -14.921875, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.6484375, "text": "<image>" }, { "id": 32000, "logprob": -11.7890625, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -14.015625, "text": "<image>" }, { "id": 32000, "logprob": -13.046875, "text": "<image>" }, { "id": 32000, "logprob": -11.7265625, "text": "<image>" }, { "id": 32000, "logprob": -13.1484375, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -13.8515625, "text": "<image>" }, { "id": 32000, "logprob": -13.78125, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -13.359375, "text": "<image>" }, { "id": 32000, "logprob": -13.2421875, "text": "<image>" }, { "id": 32000, "logprob": -13.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.859375, "text": "<image>" }, { "id": 32000, "logprob": -13.5625, "text": "<image>" }, { "id": 32000, "logprob": -13.6484375, "text": "<image>" }, { "id": 32000, "logprob": -13.6640625, "text": "<image>" }, { "id": 32000, "logprob": -11.4609375, "text": "<image>" }, { "id": 32000, "logprob": -13.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -14.6328125, "text": "<image>" }, { "id": 32000, "logprob": -11.78125, "text": "<image>" }, { "id": 32000, "logprob": -13.3984375, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -14.40625, "text": "<image>" }, { "id": 32000, "logprob": -13.5390625, "text": "<image>" }, { "id": 32000, "logprob": -14.5078125, "text": "<image>" }, { "id": 32000, "logprob": -12.7109375, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -13.1640625, "text": "<image>" }, { "id": 32000, "logprob": -13.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.875, "text": "<image>" }, { "id": 32000, "logprob": -12.1484375, "text": "<image>" }, { "id": 32000, "logprob": -12.6484375, "text": "<image>" }, { "id": 32000, "logprob": -13.6953125, "text": "<image>" }, { "id": 32000, "logprob": -12.4140625, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -13.109375, "text": "<image>" }, { "id": 32000, "logprob": -12.1796875, "text": "<image>" }, { "id": 32000, "logprob": -14.875, "text": "<image>" }, { "id": 32000, "logprob": -16.359375, "text": "<image>" }, { "id": 32000, "logprob": -11.34375, "text": "<image>" }, { "id": 32000, "logprob": -11.75, "text": "<image>" }, { "id": 32000, "logprob": -11.7890625, "text": "<image>" }, { "id": 32000, "logprob": -15.765625, "text": "<image>" }, { "id": 32000, "logprob": -15.0859375, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -11.875, "text": "<image>" }, { "id": 32000, "logprob": -13.25, "text": "<image>" }, { "id": 32000, "logprob": -13.5078125, "text": "<image>" }, { "id": 32000, "logprob": -13.1640625, "text": "<image>" }, { "id": 32000, "logprob": -14.265625, "text": "<image>" }, { "id": 32000, "logprob": -14.5859375, "text": "<image>" }, { "id": 32000, "logprob": -13.2421875, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -13.8046875, "text": "<image>" }, { "id": 32000, "logprob": -12.703125, "text": "<image>" }, { "id": 32000, "logprob": -12.0, "text": "<image>" }, { "id": 32000, "logprob": -12.2421875, "text": "<image>" }, { "id": 32000, "logprob": -16.203125, "text": "<image>" }, { "id": 32000, "logprob": -11.5859375, "text": "<image>" }, { "id": 32000, "logprob": -15.78125, "text": "<image>" }, { "id": 32000, "logprob": -12.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.953125, "text": "<image>" }, { "id": 32000, "logprob": -11.0, "text": "<image>" }, { "id": 32000, "logprob": -11.7109375, "text": "<image>" }, { "id": 32000, "logprob": -14.6875, "text": "<image>" }, { "id": 32000, "logprob": -11.34375, "text": "<image>" }, { "id": 32000, "logprob": -13.296875, "text": "<image>" }, { "id": 32000, "logprob": -12.609375, "text": "<image>" }, { "id": 32000, "logprob": -14.2265625, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -14.1640625, "text": "<image>" }, { "id": 32000, "logprob": -12.0234375, "text": "<image>" }, { "id": 32000, "logprob": -12.1953125, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.890625, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -15.1953125, "text": "<image>" }, { "id": 32000, "logprob": -11.546875, "text": "<image>" }, { "id": 32000, "logprob": -14.4375, "text": "<image>" }, { "id": 32000, "logprob": -12.265625, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.875, "text": "<image>" }, { "id": 32000, "logprob": -13.7265625, "text": "<image>" }, { "id": 32000, "logprob": -13.5390625, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -13.046875, "text": "<image>" }, { "id": 32000, "logprob": -13.953125, "text": "<image>" }, { "id": 32000, "logprob": -10.9765625, "text": "<image>" }, { "id": 32000, "logprob": -12.5078125, "text": "<image>" }, { "id": 32000, "logprob": -12.7109375, "text": "<image>" }, { "id": 32000, "logprob": -12.1796875, "text": "<image>" }, { "id": 32000, "logprob": -12.578125, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -13.4609375, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -13.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.75, "text": "<image>" }, { "id": 32000, "logprob": -15.34375, "text": "<image>" }, { "id": 32000, "logprob": -13.1875, "text": "<image>" }, { "id": 32000, "logprob": -10.3359375, "text": "<image>" }, { "id": 32000, "logprob": -13.3515625, "text": "<image>" }, { "id": 32000, "logprob": -11.8828125, "text": "<image>" }, { "id": 32000, "logprob": -12.578125, "text": "<image>" }, { "id": 32000, "logprob": -13.5078125, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -10.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.6015625, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -11.78125, "text": "<image>" }, { "id": 32000, "logprob": -11.984375, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -13.390625, "text": "<image>" }, { "id": 32000, "logprob": -12.96875, "text": "<image>" }, { "id": 32000, "logprob": -12.484375, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -13.296875, "text": "<image>" }, { "id": 32000, "logprob": -12.1171875, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -13.1328125, "text": "<image>" }, { "id": 32000, "logprob": -13.625, "text": "<image>" }, { "id": 32000, "logprob": -11.421875, "text": "<image>" }, { "id": 32000, "logprob": -11.75, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -12.3359375, "text": "<image>" }, { "id": 32000, "logprob": -12.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -13.96875, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -13.484375, "text": "<image>" }, { "id": 32000, "logprob": -12.828125, "text": "<image>" }, { "id": 32000, "logprob": -13.1640625, "text": "<image>" }, { "id": 32000, "logprob": -13.1640625, "text": "<image>" }, { "id": 32000, "logprob": -12.7734375, "text": "<image>" }, { "id": 32000, "logprob": -11.7421875, "text": "<image>" }, { "id": 32000, "logprob": -12.375, "text": "<image>" }, { "id": 32000, "logprob": -12.265625, "text": "<image>" }, { "id": 32000, "logprob": -14.6328125, "text": "<image>" }, { "id": 32000, "logprob": -12.921875, "text": "<image>" }, { "id": 32000, "logprob": -12.625, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.3828125, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.0703125, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -12.6171875, "text": "<image>" }, { "id": 32000, "logprob": -13.84375, "text": "<image>" }, { "id": 32000, "logprob": -12.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.140625, "text": "<image>" }, { "id": 32000, "logprob": -11.5078125, "text": "<image>" }, { "id": 32000, "logprob": -11.2734375, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -13.5859375, "text": "<image>" }, { "id": 32000, "logprob": -11.7890625, "text": "<image>" }, { "id": 32000, "logprob": -11.875, "text": "<image>" }, { "id": 32000, "logprob": -13.0625, "text": "<image>" }, { "id": 32000, "logprob": -13.1875, "text": "<image>" }, { "id": 32000, "logprob": -13.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.46875, "text": "<image>" }, { "id": 32000, "logprob": -12.34375, "text": "<image>" }, { "id": 32000, "logprob": -12.6953125, "text": "<image>" }, { "id": 32000, "logprob": -11.6875, "text": "<image>" }, { "id": 32000, "logprob": -11.6953125, "text": "<image>" }, { "id": 32000, "logprob": -11.7109375, "text": "<image>" }, { "id": 32000, "logprob": -15.3359375, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -16.125, "text": "<image>" }, { "id": 32000, "logprob": -15.4609375, "text": "<image>" }, { "id": 32000, "logprob": -13.921875, "text": "<image>" }, { "id": 32000, "logprob": -11.609375, "text": "<image>" }, { "id": 32000, "logprob": -11.78125, "text": "<image>" }, { "id": 32000, "logprob": -16.0, "text": "<image>" }, { "id": 32000, "logprob": -11.6484375, "text": "<image>" }, { "id": 32000, "logprob": -12.828125, "text": "<image>" }, { "id": 32000, "logprob": -12.5078125, "text": "<image>" }, { "id": 32000, "logprob": -11.734375, "text": "<image>" }, { "id": 32000, "logprob": -14.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.34375, "text": "<image>" }, { "id": 32000, "logprob": -11.7265625, "text": "<image>" }, { "id": 32000, "logprob": -13.046875, "text": "<image>" }, { "id": 32000, "logprob": -14.8984375, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -14.265625, "text": "<image>" }, { "id": 32000, "logprob": -12.109375, "text": "<image>" }, { "id": 32000, "logprob": -12.6953125, "text": "<image>" }, { "id": 32000, "logprob": -12.1953125, "text": "<image>" }, { "id": 32000, "logprob": -12.984375, "text": "<image>" }, { "id": 32000, "logprob": -13.2890625, "text": "<image>" }, { "id": 32000, "logprob": -13.0390625, "text": "<image>" }, { "id": 32000, "logprob": -11.96875, "text": "<image>" }, { "id": 32000, "logprob": -10.3984375, "text": "<image>" }, { "id": 32000, "logprob": -11.53125, "text": "<image>" }, { "id": 32000, "logprob": -14.0546875, "text": "<image>" }, { "id": 32000, "logprob": -11.5234375, "text": "<image>" }, { "id": 32000, "logprob": -11.109375, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -12.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.1796875, "text": "<image>" }, { "id": 32000, "logprob": -13.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.796875, "text": "<image>" }, { "id": 32000, "logprob": -11.625, "text": "<image>" }, { "id": 32000, "logprob": -13.59375, "text": "<image>" }, { "id": 32000, "logprob": -11.890625, "text": "<image>" }, { "id": 32000, "logprob": -14.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.5703125, "text": "<image>" }, { "id": 32000, "logprob": -12.7734375, "text": "<image>" }, { "id": 32000, "logprob": -11.375, "text": "<image>" }, { "id": 32000, "logprob": -14.03125, "text": "<image>" }, { "id": 32000, "logprob": -11.5546875, "text": "<image>" }, { "id": 32000, "logprob": -11.7578125, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -14.53125, "text": "<image>" }, { "id": 32000, "logprob": -12.609375, "text": "<image>" }, { "id": 32000, "logprob": -14.609375, "text": "<image>" }, { "id": 32000, "logprob": -13.59375, "text": "<image>" }, { "id": 32000, "logprob": -13.2109375, "text": "<image>" }, { "id": 32000, "logprob": -12.7734375, "text": "<image>" }, { "id": 32000, "logprob": -12.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.6875, "text": "<image>" }, { "id": 32000, "logprob": -12.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.484375, "text": "<image>" }, { "id": 32000, "logprob": -15.609375, "text": "<image>" }, { "id": 32000, "logprob": -11.859375, "text": "<image>" }, { "id": 32000, "logprob": -13.078125, "text": "<image>" }, { "id": 32000, "logprob": -12.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -11.546875, "text": "<image>" }, { "id": 32000, "logprob": -12.1171875, "text": "<image>" }, { "id": 32000, "logprob": -11.578125, "text": "<image>" }, { "id": 32000, "logprob": -12.5, "text": "<image>" }, { "id": 32000, "logprob": -15.5390625, "text": "<image>" }, { "id": 32000, "logprob": -14.2578125, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -13.8671875, "text": "<image>" }, { "id": 32000, "logprob": -13.2265625, "text": "<image>" }, { "id": 32000, "logprob": -14.8046875, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -11.8671875, "text": "<image>" }, { "id": 32000, "logprob": -13.1484375, "text": "<image>" }, { "id": 32000, "logprob": -12.4140625, "text": "<image>" }, { "id": 32000, "logprob": -10.671875, "text": "<image>" }, { "id": 32000, "logprob": -14.6875, "text": "<image>" }, { "id": 32000, "logprob": -15.7734375, "text": "<image>" }, { "id": 32000, "logprob": -15.453125, "text": "<image>" }, { "id": 32000, "logprob": -11.234375, "text": "<image>" }, { "id": 32000, "logprob": -14.484375, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -16.234375, "text": "<image>" }, { "id": 32000, "logprob": -14.875, "text": "<image>" }, { "id": 32000, "logprob": -16.109375, "text": "<image>" }, { "id": 32000, "logprob": -13.2890625, "text": "<image>" }, { "id": 32000, "logprob": -12.5546875, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.9921875, "text": "<image>" }, { "id": 32000, "logprob": -11.2734375, "text": "<image>" }, { "id": 32000, "logprob": -12.1484375, "text": "<image>" }, { "id": 32000, "logprob": -13.84375, "text": "<image>" }, { "id": 32000, "logprob": -13.3984375, "text": "<image>" }, { "id": 32000, "logprob": -12.3203125, "text": "<image>" }, { "id": 32000, "logprob": -11.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -11.984375, "text": "<image>" }, { "id": 32000, "logprob": -15.140625, "text": "<image>" }, { "id": 32000, "logprob": -10.828125, "text": "<image>" }, { "id": 32000, "logprob": -13.171875, "text": "<image>" }, { "id": 32000, "logprob": -15.4453125, "text": "<image>" }, { "id": 32000, "logprob": -11.1015625, "text": "<image>" }, { "id": 32000, "logprob": -10.5078125, "text": "<image>" }, { "id": 32000, "logprob": -11.859375, "text": "<image>" }, { "id": 32000, "logprob": -11.0, "text": "<image>" }, { "id": 32000, "logprob": -11.2890625, "text": "<image>" }, { "id": 32000, "logprob": -11.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.4921875, "text": "<image>" }, { "id": 32000, "logprob": -16.46875, "text": "<image>" }, { "id": 32000, "logprob": -13.46875, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -11.1953125, "text": "<image>" }, { "id": 32000, "logprob": -11.3359375, "text": "<image>" }, { "id": 32000, "logprob": -12.1640625, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -11.8671875, "text": "<image>" }, { "id": 32000, "logprob": -9.9140625, "text": "<image>" }, { "id": 32000, "logprob": -11.265625, "text": "<image>" }, { "id": 32000, "logprob": -12.1875, "text": "<image>" }, { "id": 32000, "logprob": -14.265625, "text": "<image>" }, { "id": 32000, "logprob": -12.15625, "text": "<image>" }, { "id": 32000, "logprob": -11.65625, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -11.8984375, "text": "<image>" }, { "id": 32000, "logprob": -14.859375, "text": "<image>" }, { "id": 32000, "logprob": -14.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.8671875, "text": "<image>" }, { "id": 32000, "logprob": -10.59375, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -12.8984375, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -15.765625, "text": "<image>" }, { "id": 32000, "logprob": -15.203125, "text": "<image>" }, { "id": 32000, "logprob": -14.25, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -12.34375, "text": "<image>" }, { "id": 32000, "logprob": -12.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.0078125, "text": "<image>" }, { "id": 32000, "logprob": -11.296875, "text": "<image>" }, { "id": 32000, "logprob": -13.2734375, "text": "<image>" }, { "id": 32000, "logprob": -11.7578125, "text": "<image>" }, { "id": 32000, "logprob": -13.09375, "text": "<image>" }, { "id": 32000, "logprob": -13.6796875, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.421875, "text": "<image>" }, { "id": 32000, "logprob": -11.984375, "text": "<image>" }, { "id": 32000, "logprob": -12.2890625, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -13.125, "text": "<image>" }, { "id": 32000, "logprob": -15.9453125, "text": "<image>" }, { "id": 32000, "logprob": -13.140625, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.5234375, "text": "<image>" }, { "id": 32000, "logprob": -11.328125, "text": "<image>" }, { "id": 32000, "logprob": -11.5, "text": "<image>" }, { "id": 32000, "logprob": -11.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.3359375, "text": "<image>" }, { "id": 32000, "logprob": -9.7109375, "text": "<image>" }, { "id": 32000, "logprob": -11.765625, "text": "<image>" }, { "id": 32000, "logprob": -14.2421875, "text": "<image>" }, { "id": 32000, "logprob": -11.96875, "text": "<image>" }, { "id": 32000, "logprob": -12.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.5234375, "text": "<image>" }, { "id": 32000, "logprob": -12.1953125, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.84375, "text": "<image>" }, { "id": 32000, "logprob": -12.0, "text": "<image>" }, { "id": 32000, "logprob": -12.1953125, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.7109375, "text": "<image>" }, { "id": 32000, "logprob": -13.0859375, "text": "<image>" }, { "id": 32000, "logprob": -13.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.7421875, "text": "<image>" }, { "id": 32000, "logprob": -11.953125, "text": "<image>" }, { "id": 32000, "logprob": -13.8046875, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.0859375, "text": "<image>" }, { "id": 32000, "logprob": -10.015625, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -13.8125, "text": "<image>" }, { "id": 32000, "logprob": -12.109375, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -12.5703125, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -12.453125, "text": "<image>" }, { "id": 32000, "logprob": -12.2890625, "text": "<image>" }, { "id": 32000, "logprob": -12.46875, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.015625, "text": "<image>" }, { "id": 32000, "logprob": -12.765625, "text": "<image>" }, { "id": 32000, "logprob": -14.421875, "text": "<image>" }, { "id": 32000, "logprob": -13.25, "text": "<image>" }, { "id": 32000, "logprob": -16.53125, "text": "<image>" }, { "id": 32000, "logprob": -14.484375, "text": "<image>" }, { "id": 32000, "logprob": -13.7421875, "text": "<image>" }, { "id": 32000, "logprob": -13.46875, "text": "<image>" }, { "id": 32000, "logprob": -13.046875, "text": "<image>" }, { "id": 32000, "logprob": -12.8671875, "text": "<image>" }, { "id": 32000, "logprob": -12.4921875, "text": "<image>" }, { "id": 32000, "logprob": -13.2265625, "text": "<image>" }, { "id": 32000, "logprob": -13.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -10.765625, "text": "<image>" }, { "id": 32000, "logprob": -13.109375, "text": "<image>" }, { "id": 32000, "logprob": -12.8515625, "text": "<image>" }, { "id": 32000, "logprob": -14.7421875, "text": "<image>" }, { "id": 32000, "logprob": -12.84375, "text": "<image>" }, { "id": 32000, "logprob": -12.890625, "text": "<image>" }, { "id": 32000, "logprob": -12.609375, "text": "<image>" }, { "id": 32000, "logprob": -12.9296875, "text": "<image>" }, { "id": 32000, "logprob": -12.625, "text": "<image>" }, { "id": 32000, "logprob": -12.6171875, "text": "<image>" }, { "id": 32000, "logprob": -12.8671875, "text": "<image>" }, { "id": 32000, "logprob": -12.7265625, "text": "<image>" }, { "id": 32000, "logprob": -12.9765625, "text": "<image>" }, { "id": 32000, "logprob": -12.6484375, "text": "<image>" }, { "id": 32000, "logprob": -13.03125, "text": "<image>" }, { "id": 32000, "logprob": -13.4296875, "text": "<image>" }, { "id": 32000, "logprob": -13.2734375, "text": "<image>" }, { "id": 32000, "logprob": -12.9921875, "text": "<image>" }, { "id": 32000, "logprob": -11.5234375, "text": "<image>" }, { "id": 32000, "logprob": -15.015625, "text": "<image>" }, { "id": 32000, "logprob": -12.2109375, "text": "<image>" }, { "id": 32000, "logprob": -12.6953125, "text": "<image>" }, { "id": 32000, "logprob": -14.9375, "text": "<image>" }, { "id": 32000, "logprob": -12.6953125, "text": "<image>" }, { "id": 32000, "logprob": -10.78125, "text": "<image>" }, { "id": 32000, "logprob": -13.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.9921875, "text": "<image>" }, { "id": 32000, "logprob": -16.125, "text": "<image>" }, { "id": 32000, "logprob": -13.46875, "text": "<image>" }, { "id": 32000, "logprob": -13.7109375, "text": "<image>" }, { "id": 32000, "logprob": -13.6015625, "text": "<image>" }, { "id": 32000, "logprob": -13.25, "text": "<image>" }, { "id": 32000, "logprob": -13.2578125, "text": "<image>" }, { "id": 32000, "logprob": -13.5, "text": "<image>" }, { "id": 32000, "logprob": -13.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.6171875, "text": "<image>" }, { "id": 32000, "logprob": -14.2890625, "text": "<image>" }, { "id": 32000, "logprob": -14.4765625, "text": "<image>" }, { "id": 32000, "logprob": -13.1484375, "text": "<image>" }, { "id": 32000, "logprob": -17.0625, "text": "<image>" }, { "id": 32000, "logprob": -14.046875, "text": "<image>" }, { "id": 32000, "logprob": -14.2734375, "text": "<image>" }, { "id": 32000, "logprob": -12.625, "text": "<image>" }, { "id": 32000, "logprob": -12.5625, "text": "<image>" }, { "id": 32000, "logprob": -13.9140625, "text": "<image>" }, { "id": 32000, "logprob": -13.03125, "text": "<image>" }, { "id": 32000, "logprob": -10.5, "text": "<image>" }, { "id": 32000, "logprob": -13.1953125, "text": "<image>" }, { "id": 32000, "logprob": -13.53125, "text": "<image>" }, { "id": 32000, "logprob": -13.3828125, "text": "<image>" }, { "id": 32000, "logprob": -14.0625, "text": "<image>" }, { "id": 32000, "logprob": -13.5234375, "text": "<image>" }, { "id": 32000, "logprob": -14.15625, "text": "<image>" }, { "id": 32000, "logprob": -16.9375, "text": "<image>" }, { "id": 32000, "logprob": -14.015625, "text": "<image>" }, { "id": 32000, "logprob": -14.09375, "text": "<image>" }, { "id": 32000, "logprob": -13.828125, "text": "<image>" }, { "id": 32000, "logprob": -13.328125, "text": "<image>" }, { "id": 32000, "logprob": -13.0234375, "text": "<image>" }, { "id": 32000, "logprob": -13.375, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -14.7890625, "text": "<image>" }, { "id": 32000, "logprob": -13.53125, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.4609375, "text": "<image>" }, { "id": 32000, "logprob": -13.078125, "text": "<image>" }, { "id": 32000, "logprob": -12.3359375, "text": "<image>" }, { "id": 32000, "logprob": -13.6171875, "text": "<image>" }, { "id": 32000, "logprob": -12.75, "text": "<image>" }, { "id": 32000, "logprob": -14.859375, "text": "<image>" }, { "id": 32000, "logprob": -13.078125, "text": "<image>" }, { "id": 32000, "logprob": -14.171875, "text": "<image>" }, { "id": 32000, "logprob": -13.078125, "text": "<image>" }, { "id": 32000, "logprob": -14.4921875, "text": "<image>" }, { "id": 32000, "logprob": -18.15625, "text": "<image>" }, { "id": 32000, "logprob": -14.703125, "text": "<image>" }, { "id": 32000, "logprob": -14.71875, "text": "<image>" }, { "id": 32000, "logprob": -16.3125, "text": "<image>" }, { "id": 32000, "logprob": -14.9765625, "text": "<image>" }, { "id": 32000, "logprob": -13.8046875, "text": "<image>" }, { "id": 32000, "logprob": -15.375, "text": "<image>" }, { "id": 32000, "logprob": -13.3984375, "text": "<image>" }, { "id": 32000, "logprob": -14.578125, "text": "<image>" }, { "id": 32000, "logprob": -15.2890625, "text": "<image>" }, { "id": 32000, "logprob": -13.609375, "text": "<image>" }, { "id": 32000, "logprob": -13.328125, "text": "<image>" }, { "id": 32000, "logprob": -15.8359375, "text": "<image>" }, { "id": 32000, "logprob": -13.765625, "text": "<image>" }, { "id": 32000, "logprob": -16.90625, "text": "<image>" }, { "id": 32000, "logprob": -13.3203125, "text": "<image>" }, { "id": 32000, "logprob": -13.7734375, "text": "<image>" }, { "id": 32000, "logprob": -13.3203125, "text": "<image>" }, { "id": 32000, "logprob": -13.4765625, "text": "<image>" }, { "id": 32000, "logprob": -13.390625, "text": "<image>" }, { "id": 32000, "logprob": -13.2734375, "text": "<image>" }, { "id": 32000, "logprob": -13.046875, "text": "<image>" }, { "id": 32000, "logprob": -13.7421875, "text": "<image>" }, { "id": 32000, "logprob": -13.5, "text": "<image>" }, { "id": 32000, "logprob": -13.0390625, "text": "<image>" }, { "id": 32000, "logprob": -13.4375, "text": "<image>" }, { "id": 32000, "logprob": -13.7421875, "text": "<image>" }, { "id": 32000, "logprob": -13.6796875, "text": "<image>" }, { "id": 32000, "logprob": -13.546875, "text": "<image>" }, { "id": 32000, "logprob": -13.3203125, "text": "<image>" }, { "id": 32000, "logprob": -13.4453125, "text": "<image>" }, { "id": 32000, "logprob": -13.6171875, "text": "<image>" }, { "id": 32000, "logprob": -15.140625, "text": "<image>" }, { "id": 32000, "logprob": -13.21875, "text": "<image>" }, { "id": 32000, "logprob": -13.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.0234375, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -11.3125, "text": "<image>" }, { "id": 32000, "logprob": -10.65625, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -13.25, "text": "<image>" }, { "id": 32000, "logprob": -11.6328125, "text": "<image>" }, { "id": 32000, "logprob": -12.875, "text": "<image>" }, { "id": 32000, "logprob": -12.546875, "text": "<image>" }, { "id": 32000, "logprob": -12.8203125, "text": "<image>" }, { "id": 32000, "logprob": -13.9765625, "text": "<image>" }, { "id": 32000, "logprob": -13.1796875, "text": "<image>" }, { "id": 32000, "logprob": -12.984375, "text": "<image>" }, { "id": 32000, "logprob": -13.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.75, "text": "<image>" }, { "id": 32000, "logprob": -14.578125, "text": "<image>" }, { "id": 32000, "logprob": -14.625, "text": "<image>" }, { "id": 32000, "logprob": -14.734375, "text": "<image>" }, { "id": 32000, "logprob": -14.5078125, "text": "<image>" }, { "id": 32000, "logprob": -14.9140625, "text": "<image>" }, { "id": 32000, "logprob": -13.046875, "text": "<image>" }, { "id": 32000, "logprob": -13.453125, "text": "<image>" }, { "id": 32000, "logprob": -14.78125, "text": "<image>" }, { "id": 32000, "logprob": -14.9453125, "text": "<image>" }, { "id": 32000, "logprob": -13.9921875, "text": "<image>" }, { "id": 32000, "logprob": -13.4921875, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -14.4765625, "text": "<image>" }, { "id": 32000, "logprob": -15.9609375, "text": "<image>" }, { "id": 32000, "logprob": -14.234375, "text": "<image>" }, { "id": 32000, "logprob": -13.1328125, "text": "<image>" }, { "id": 32000, "logprob": -12.6640625, "text": "<image>" }, { "id": 32000, "logprob": -12.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.7734375, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.6796875, "text": "<image>" }, { "id": 32000, "logprob": -13.625, "text": "<image>" }, { "id": 32000, "logprob": -12.890625, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.7734375, "text": "<image>" }, { "id": 32000, "logprob": -14.265625, "text": "<image>" }, { "id": 32000, "logprob": -12.0234375, "text": "<image>" }, { "id": 32000, "logprob": -12.71875, "text": "<image>" }, { "id": 32000, "logprob": -13.0078125, "text": "<image>" }, { "id": 32000, "logprob": -12.8671875, "text": "<image>" }, { "id": 32000, "logprob": -15.078125, "text": "<image>" }, { "id": 32000, "logprob": -12.921875, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -10.5078125, "text": "<image>" }, { "id": 32000, "logprob": -10.265625, "text": "<image>" }, { "id": 32000, "logprob": -11.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.0703125, "text": "<image>" }, { "id": 32000, "logprob": -11.28125, "text": "<image>" }, { "id": 32000, "logprob": -11.046875, "text": "<image>" }, { "id": 32000, "logprob": -13.3203125, "text": "<image>" }, { "id": 32000, "logprob": -11.03125, "text": "<image>" }, { "id": 32000, "logprob": -12.53125, "text": "<image>" }, { "id": 32000, "logprob": -12.203125, "text": "<image>" }, { "id": 32000, "logprob": -12.015625, "text": "<image>" }, { "id": 32000, "logprob": -13.8125, "text": "<image>" }, { "id": 32000, "logprob": -12.0859375, "text": "<image>" }, { "id": 32000, "logprob": -14.1796875, "text": "<image>" }, { "id": 32000, "logprob": -14.0703125, "text": "<image>" }, { "id": 32000, "logprob": -14.03125, "text": "<image>" }, { "id": 32000, "logprob": -14.0625, "text": "<image>" }, { "id": 32000, "logprob": -13.0390625, "text": "<image>" }, { "id": 32000, "logprob": -12.8203125, "text": "<image>" }, { "id": 32000, "logprob": -14.453125, "text": "<image>" }, { "id": 32000, "logprob": -14.6875, "text": "<image>" }, { "id": 32000, "logprob": -14.3046875, "text": "<image>" }, { "id": 32000, "logprob": -14.8671875, "text": "<image>" }, { "id": 32000, "logprob": -14.1640625, "text": "<image>" }, { "id": 32000, "logprob": -13.203125, "text": "<image>" }, { "id": 32000, "logprob": -13.109375, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -13.3203125, "text": "<image>" }, { "id": 32000, "logprob": -16.140625, "text": "<image>" }, { "id": 32000, "logprob": -12.7109375, "text": "<image>" }, { "id": 32000, "logprob": -13.7109375, "text": "<image>" }, { "id": 32000, "logprob": -13.3984375, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -12.140625, "text": "<image>" }, { "id": 32000, "logprob": -12.3828125, "text": "<image>" }, { "id": 32000, "logprob": -12.875, "text": "<image>" }, { "id": 32000, "logprob": -13.6484375, "text": "<image>" }, { "id": 32000, "logprob": -13.0390625, "text": "<image>" }, { "id": 32000, "logprob": -13.3203125, "text": "<image>" }, { "id": 32000, "logprob": -12.6640625, "text": "<image>" }, { "id": 32000, "logprob": -12.34375, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.453125, "text": "<image>" }, { "id": 32000, "logprob": -12.015625, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -14.46875, "text": "<image>" }, { "id": 32000, "logprob": -13.3203125, "text": "<image>" }, { "id": 32000, "logprob": -12.71875, "text": "<image>" }, { "id": 32000, "logprob": -10.6484375, "text": "<image>" }, { "id": 32000, "logprob": -11.6484375, "text": "<image>" }, { "id": 32000, "logprob": -10.84375, "text": "<image>" }, { "id": 32000, "logprob": -10.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.2578125, "text": "<image>" }, { "id": 32000, "logprob": -11.7265625, "text": "<image>" }, { "id": 32000, "logprob": -10.8515625, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -13.140625, "text": "<image>" }, { "id": 32000, "logprob": -12.734375, "text": "<image>" }, { "id": 32000, "logprob": -12.1640625, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -13.1328125, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -12.84375, "text": "<image>" }, { "id": 32000, "logprob": -12.6640625, "text": "<image>" }, { "id": 32000, "logprob": -12.0859375, "text": "<image>" }, { "id": 32000, "logprob": -13.609375, "text": "<image>" }, { "id": 32000, "logprob": -12.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.3125, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -14.0546875, "text": "<image>" }, { "id": 32000, "logprob": -13.984375, "text": "<image>" }, { "id": 32000, "logprob": -13.7890625, "text": "<image>" }, { "id": 32000, "logprob": -13.2109375, "text": "<image>" }, { "id": 32000, "logprob": -13.484375, "text": "<image>" }, { "id": 32000, "logprob": -13.703125, "text": "<image>" }, { "id": 32000, "logprob": -12.59375, "text": "<image>" }, { "id": 32000, "logprob": -15.7890625, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.46875, "text": "<image>" }, { "id": 32000, "logprob": -12.0, "text": "<image>" }, { "id": 32000, "logprob": -11.3359375, "text": "<image>" }, { "id": 32000, "logprob": -11.4296875, "text": "<image>" }, { "id": 32000, "logprob": -11.8671875, "text": "<image>" }, { "id": 32000, "logprob": -13.828125, "text": "<image>" }, { "id": 32000, "logprob": -11.75, "text": "<image>" }, { "id": 32000, "logprob": -11.46875, "text": "<image>" }, { "id": 32000, "logprob": -11.4765625, "text": "<image>" }, { "id": 32000, "logprob": -11.6328125, "text": "<image>" }, { "id": 32000, "logprob": -11.625, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -15.2265625, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -13.3203125, "text": "<image>" }, { "id": 32000, "logprob": -15.2734375, "text": "<image>" }, { "id": 32000, "logprob": -11.1875, "text": "<image>" }, { "id": 32000, "logprob": -11.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -13.453125, "text": "<image>" }, { "id": 32000, "logprob": -13.0234375, "text": "<image>" }, { "id": 32000, "logprob": -11.53125, "text": "<image>" }, { "id": 32000, "logprob": -12.0078125, "text": "<image>" }, { "id": 32000, "logprob": -11.1875, "text": "<image>" }, { "id": 32000, "logprob": -11.890625, "text": "<image>" }, { "id": 32000, "logprob": -11.3125, "text": "<image>" }, { "id": 32000, "logprob": -11.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.4921875, "text": "<image>" }, { "id": 32000, "logprob": -12.0703125, "text": "<image>" }, { "id": 32000, "logprob": -13.3671875, "text": "<image>" }, { "id": 32000, "logprob": -13.28125, "text": "<image>" }, { "id": 32000, "logprob": -12.8359375, "text": "<image>" }, { "id": 32000, "logprob": -13.0390625, "text": "<image>" }, { "id": 32000, "logprob": -13.078125, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -13.484375, "text": "<image>" }, { "id": 32000, "logprob": -12.578125, "text": "<image>" }, { "id": 32000, "logprob": -13.703125, "text": "<image>" }, { "id": 32000, "logprob": -14.34375, "text": "<image>" }, { "id": 32000, "logprob": -14.3203125, "text": "<image>" }, { "id": 32000, "logprob": -12.7109375, "text": "<image>" }, { "id": 32000, "logprob": -14.28125, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -13.7890625, "text": "<image>" }, { "id": 32000, "logprob": -15.203125, "text": "<image>" }, { "id": 32000, "logprob": -13.28125, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -14.3203125, "text": "<image>" }, { "id": 32000, "logprob": -13.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.1171875, "text": "<image>" }, { "id": 32000, "logprob": -14.03125, "text": "<image>" }, { "id": 32000, "logprob": -14.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -11.953125, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.6953125, "text": "<image>" }, { "id": 32000, "logprob": -12.5390625, "text": "<image>" }, { "id": 32000, "logprob": -11.6484375, "text": "<image>" }, { "id": 32000, "logprob": -11.703125, "text": "<image>" }, { "id": 32000, "logprob": -11.65625, "text": "<image>" }, { "id": 32000, "logprob": -14.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -12.3359375, "text": "<image>" }, { "id": 32000, "logprob": -11.6328125, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -12.5234375, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -11.2734375, "text": "<image>" }, { "id": 32000, "logprob": -11.2578125, "text": "<image>" }, { "id": 32000, "logprob": -13.1640625, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -13.03125, "text": "<image>" }, { "id": 32000, "logprob": -11.5234375, "text": "<image>" }, { "id": 32000, "logprob": -14.015625, "text": "<image>" }, { "id": 32000, "logprob": -13.75, "text": "<image>" }, { "id": 32000, "logprob": -12.3359375, "text": "<image>" }, { "id": 32000, "logprob": -12.921875, "text": "<image>" }, { "id": 32000, "logprob": -13.2109375, "text": "<image>" }, { "id": 32000, "logprob": -12.8359375, "text": "<image>" }, { "id": 32000, "logprob": -13.0703125, "text": "<image>" }, { "id": 32000, "logprob": -11.78125, "text": "<image>" }, { "id": 32000, "logprob": -13.703125, "text": "<image>" }, { "id": 32000, "logprob": -14.46875, "text": "<image>" }, { "id": 32000, "logprob": -14.234375, "text": "<image>" }, { "id": 32000, "logprob": -13.6796875, "text": "<image>" }, { "id": 32000, "logprob": -13.8515625, "text": "<image>" }, { "id": 32000, "logprob": -12.3203125, "text": "<image>" }, { "id": 32000, "logprob": -14.25, "text": "<image>" }, { "id": 32000, "logprob": -14.28125, "text": "<image>" }, { "id": 32000, "logprob": -14.0, "text": "<image>" }, { "id": 32000, "logprob": -14.625, "text": "<image>" }, { "id": 32000, "logprob": -12.8515625, "text": "<image>" }, { "id": 32000, "logprob": -13.453125, "text": "<image>" }, { "id": 32000, "logprob": -15.4296875, "text": "<image>" }, { "id": 32000, "logprob": -13.1171875, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -14.671875, "text": "<image>" }, { "id": 32000, "logprob": -14.2578125, "text": "<image>" }, { "id": 32000, "logprob": -11.875, "text": "<image>" }, { "id": 32000, "logprob": -12.8359375, "text": "<image>" }, { "id": 32000, "logprob": -13.828125, "text": "<image>" }, { "id": 32000, "logprob": -13.59375, "text": "<image>" }, { "id": 32000, "logprob": -12.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -12.0078125, "text": "<image>" }, { "id": 32000, "logprob": -13.40625, "text": "<image>" }, { "id": 32000, "logprob": -14.8203125, "text": "<image>" }, { "id": 32000, "logprob": -11.046875, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.5234375, "text": "<image>" }, { "id": 32000, "logprob": -11.140625, "text": "<image>" }, { "id": 32000, "logprob": -10.7890625, "text": "<image>" }, { "id": 32000, "logprob": -10.9453125, "text": "<image>" }, { "id": 32000, "logprob": -10.890625, "text": "<image>" }, { "id": 32000, "logprob": -12.78125, "text": "<image>" }, { "id": 32000, "logprob": -11.1171875, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -11.1484375, "text": "<image>" }, { "id": 32000, "logprob": -11.15625, "text": "<image>" }, { "id": 32000, "logprob": -11.40625, "text": "<image>" }, { "id": 32000, "logprob": -12.734375, "text": "<image>" }, { "id": 32000, "logprob": -13.0859375, "text": "<image>" }, { "id": 32000, "logprob": -14.046875, "text": "<image>" }, { "id": 32000, "logprob": -12.9609375, "text": "<image>" }, { "id": 32000, "logprob": -12.34375, "text": "<image>" }, { "id": 32000, "logprob": -12.84375, "text": "<image>" }, { "id": 32000, "logprob": -13.9765625, "text": "<image>" }, { "id": 32000, "logprob": -12.6875, "text": "<image>" }, { "id": 32000, "logprob": -13.484375, "text": "<image>" }, { "id": 32000, "logprob": -13.53125, "text": "<image>" }, { "id": 32000, "logprob": -13.5859375, "text": "<image>" }, { "id": 32000, "logprob": -13.3828125, "text": "<image>" }, { "id": 32000, "logprob": -14.25, "text": "<image>" }, { "id": 32000, "logprob": -14.2578125, "text": "<image>" }, { "id": 32000, "logprob": -14.1640625, "text": "<image>" }, { "id": 32000, "logprob": -13.671875, "text": "<image>" }, { "id": 32000, "logprob": -14.2578125, "text": "<image>" }, { "id": 32000, "logprob": -13.8671875, "text": "<image>" }, { "id": 32000, "logprob": -12.921875, "text": "<image>" }, { "id": 32000, "logprob": -15.6484375, "text": "<image>" }, { "id": 32000, "logprob": -13.03125, "text": "<image>" }, { "id": 32000, "logprob": -12.34375, "text": "<image>" }, { "id": 32000, "logprob": -12.7265625, "text": "<image>" }, { "id": 32000, "logprob": -14.515625, "text": "<image>" }, { "id": 32000, "logprob": -12.75, "text": "<image>" }, { "id": 32000, "logprob": -12.265625, "text": "<image>" }, { "id": 32000, "logprob": -13.546875, "text": "<image>" }, { "id": 32000, "logprob": -13.1328125, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.5625, "text": "<image>" }, { "id": 32000, "logprob": -11.6640625, "text": "<image>" }, { "id": 32000, "logprob": -12.15625, "text": "<image>" }, { "id": 32000, "logprob": -11.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.390625, "text": "<image>" }, { "id": 32000, "logprob": -11.0546875, "text": "<image>" }, { "id": 32000, "logprob": -11.96875, "text": "<image>" }, { "id": 32000, "logprob": -12.0078125, "text": "<image>" }, { "id": 32000, "logprob": -12.984375, "text": "<image>" }, { "id": 32000, "logprob": -13.2578125, "text": "<image>" }, { "id": 32000, "logprob": -12.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.3515625, "text": "<image>" }, { "id": 32000, "logprob": -11.2578125, "text": "<image>" }, { "id": 32000, "logprob": -11.8828125, "text": "<image>" }, { "id": 32000, "logprob": -11.6484375, "text": "<image>" }, { "id": 32000, "logprob": -12.8125, "text": "<image>" }, { "id": 32000, "logprob": -11.15625, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -11.59375, "text": "<image>" }, { "id": 32000, "logprob": -13.6953125, "text": "<image>" }, { "id": 32000, "logprob": -12.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.0859375, "text": "<image>" }, { "id": 32000, "logprob": -12.03125, "text": "<image>" }, { "id": 32000, "logprob": -12.296875, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -13.84375, "text": "<image>" }, { "id": 32000, "logprob": -12.875, "text": "<image>" }, { "id": 32000, "logprob": -13.953125, "text": "<image>" }, { "id": 32000, "logprob": -13.53125, "text": "<image>" }, { "id": 32000, "logprob": -13.9609375, "text": "<image>" }, { "id": 32000, "logprob": -12.46875, "text": "<image>" }, { "id": 32000, "logprob": -13.3984375, "text": "<image>" }, { "id": 32000, "logprob": -14.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.921875, "text": "<image>" }, { "id": 32000, "logprob": -14.2578125, "text": "<image>" }, { "id": 32000, "logprob": -14.3046875, "text": "<image>" }, { "id": 32000, "logprob": -14.3984375, "text": "<image>" }, { "id": 32000, "logprob": -15.3203125, "text": "<image>" }, { "id": 32000, "logprob": -13.4140625, "text": "<image>" }, { "id": 32000, "logprob": -12.3515625, "text": "<image>" }, { "id": 32000, "logprob": -13.46875, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.2890625, "text": "<image>" }, { "id": 32000, "logprob": -14.3515625, "text": "<image>" }, { "id": 32000, "logprob": -13.140625, "text": "<image>" }, { "id": 32000, "logprob": -13.0234375, "text": "<image>" }, { "id": 32000, "logprob": -12.5625, "text": "<image>" }, { "id": 32000, "logprob": -11.8125, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -11.53125, "text": "<image>" }, { "id": 32000, "logprob": -12.2265625, "text": "<image>" }, { "id": 32000, "logprob": -11.2890625, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -11.46875, "text": "<image>" }, { "id": 32000, "logprob": -11.40625, "text": "<image>" }, { "id": 32000, "logprob": -12.03125, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.4453125, "text": "<image>" }, { "id": 32000, "logprob": -11.34375, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -11.96875, "text": "<image>" }, { "id": 32000, "logprob": -14.6015625, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.4765625, "text": "<image>" }, { "id": 32000, "logprob": -13.25, "text": "<image>" }, { "id": 32000, "logprob": -12.5390625, "text": "<image>" }, { "id": 32000, "logprob": -11.078125, "text": "<image>" }, { "id": 32000, "logprob": -11.5546875, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -11.75, "text": "<image>" }, { "id": 32000, "logprob": -12.6015625, "text": "<image>" }, { "id": 32000, "logprob": -14.3203125, "text": "<image>" }, { "id": 32000, "logprob": -13.15625, "text": "<image>" }, { "id": 32000, "logprob": -13.4453125, "text": "<image>" }, { "id": 32000, "logprob": -14.2890625, "text": "<image>" }, { "id": 32000, "logprob": -13.7890625, "text": "<image>" }, { "id": 32000, "logprob": -13.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.96875, "text": "<image>" }, { "id": 32000, "logprob": -13.46875, "text": "<image>" }, { "id": 32000, "logprob": -14.1171875, "text": "<image>" }, { "id": 32000, "logprob": -13.0703125, "text": "<image>" }, { "id": 32000, "logprob": -12.9609375, "text": "<image>" }, { "id": 32000, "logprob": -14.90625, "text": "<image>" }, { "id": 32000, "logprob": -13.0390625, "text": "<image>" }, { "id": 32000, "logprob": -10.921875, "text": "<image>" }, { "id": 32000, "logprob": -13.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.8671875, "text": "<image>" }, { "id": 32000, "logprob": -12.15625, "text": "<image>" }, { "id": 32000, "logprob": -12.4140625, "text": "<image>" }, { "id": 32000, "logprob": -12.421875, "text": "<image>" }, { "id": 32000, "logprob": -13.59375, "text": "<image>" }, { "id": 32000, "logprob": -12.5, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.2265625, "text": "<image>" }, { "id": 32000, "logprob": -14.3828125, "text": "<image>" }, { "id": 32000, "logprob": -11.609375, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.2578125, "text": "<image>" }, { "id": 32000, "logprob": -11.0, "text": "<image>" }, { "id": 32000, "logprob": -11.6484375, "text": "<image>" }, { "id": 32000, "logprob": -11.1328125, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.6875, "text": "<image>" }, { "id": 32000, "logprob": -12.484375, "text": "<image>" }, { "id": 32000, "logprob": -11.8828125, "text": "<image>" }, { "id": 32000, "logprob": -12.53125, "text": "<image>" }, { "id": 32000, "logprob": -12.6484375, "text": "<image>" }, { "id": 32000, "logprob": -11.7265625, "text": "<image>" }, { "id": 32000, "logprob": -13.671875, "text": "<image>" }, { "id": 32000, "logprob": -14.8046875, "text": "<image>" }, { "id": 32000, "logprob": -11.609375, "text": "<image>" }, { "id": 32000, "logprob": -13.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.421875, "text": "<image>" }, { "id": 32000, "logprob": -12.328125, "text": "<image>" }, { "id": 32000, "logprob": -13.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.5078125, "text": "<image>" }, { "id": 32000, "logprob": -13.125, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -12.2890625, "text": "<image>" }, { "id": 32000, "logprob": -12.7734375, "text": "<image>" }, { "id": 32000, "logprob": -13.15625, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -13.8203125, "text": "<image>" }, { "id": 32000, "logprob": -13.65625, "text": "<image>" }, { "id": 32000, "logprob": -13.0390625, "text": "<image>" }, { "id": 32000, "logprob": -15.6953125, "text": "<image>" }, { "id": 32000, "logprob": -14.2265625, "text": "<image>" }, { "id": 32000, "logprob": -14.4140625, "text": "<image>" }, { "id": 32000, "logprob": -13.1328125, "text": "<image>" }, { "id": 32000, "logprob": -12.296875, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.328125, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -14.0, "text": "<image>" }, { "id": 32000, "logprob": -14.234375, "text": "<image>" }, { "id": 32000, "logprob": -11.4375, "text": "<image>" }, { "id": 32000, "logprob": -11.828125, "text": "<image>" }, { "id": 32000, "logprob": -11.453125, "text": "<image>" }, { "id": 32000, "logprob": -11.453125, "text": "<image>" }, { "id": 32000, "logprob": -11.984375, "text": "<image>" }, { "id": 32000, "logprob": -11.609375, "text": "<image>" }, { "id": 32000, "logprob": -11.8046875, "text": "<image>" }, { "id": 32000, "logprob": -11.7109375, "text": "<image>" }, { "id": 32000, "logprob": -12.0859375, "text": "<image>" }, { "id": 32000, "logprob": -10.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.734375, "text": "<image>" }, { "id": 32000, "logprob": -10.8046875, "text": "<image>" }, { "id": 32000, "logprob": -11.578125, "text": "<image>" }, { "id": 32000, "logprob": -11.15625, "text": "<image>" }, { "id": 32000, "logprob": -12.796875, "text": "<image>" }, { "id": 32000, "logprob": -11.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.328125, "text": "<image>" }, { "id": 32000, "logprob": -12.9296875, "text": "<image>" }, { "id": 32000, "logprob": -11.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.5625, "text": "<image>" }, { "id": 32000, "logprob": -11.84375, "text": "<image>" }, { "id": 32000, "logprob": -10.4140625, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -12.671875, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -13.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.0703125, "text": "<image>" }, { "id": 32000, "logprob": -13.015625, "text": "<image>" }, { "id": 32000, "logprob": -14.1484375, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -12.921875, "text": "<image>" }, { "id": 32000, "logprob": -12.96875, "text": "<image>" }, { "id": 32000, "logprob": -13.03125, "text": "<image>" }, { "id": 32000, "logprob": -13.953125, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -14.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.6015625, "text": "<image>" }, { "id": 32000, "logprob": -13.640625, "text": "<image>" }, { "id": 32000, "logprob": -15.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.7421875, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -11.5078125, "text": "<image>" }, { "id": 32000, "logprob": -12.3828125, "text": "<image>" }, { "id": 32000, "logprob": -13.6328125, "text": "<image>" }, { "id": 32000, "logprob": -12.4296875, "text": "<image>" }, { "id": 32000, "logprob": -11.3984375, "text": "<image>" }, { "id": 32000, "logprob": -11.5, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -14.0859375, "text": "<image>" }, { "id": 32000, "logprob": -10.9296875, "text": "<image>" }, { "id": 32000, "logprob": -11.0546875, "text": "<image>" }, { "id": 32000, "logprob": -11.296875, "text": "<image>" }, { "id": 32000, "logprob": -11.9609375, "text": "<image>" }, { "id": 32000, "logprob": -14.96875, "text": "<image>" }, { "id": 32000, "logprob": -12.6328125, "text": "<image>" }, { "id": 32000, "logprob": -12.75, "text": "<image>" }, { "id": 32000, "logprob": -12.6953125, "text": "<image>" }, { "id": 32000, "logprob": -11.9609375, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.2890625, "text": "<image>" }, { "id": 32000, "logprob": -11.421875, "text": "<image>" }, { "id": 32000, "logprob": -10.984375, "text": "<image>" }, { "id": 32000, "logprob": -12.140625, "text": "<image>" }, { "id": 32000, "logprob": -12.2578125, "text": "<image>" }, { "id": 32000, "logprob": -12.46875, "text": "<image>" }, { "id": 32000, "logprob": -11.5, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -13.9609375, "text": "<image>" }, { "id": 32000, "logprob": -14.1328125, "text": "<image>" }, { "id": 32000, "logprob": -12.9375, "text": "<image>" }, { "id": 32000, "logprob": -14.6328125, "text": "<image>" }, { "id": 32000, "logprob": -12.609375, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.7578125, "text": "<image>" }, { "id": 32000, "logprob": -12.8828125, "text": "<image>" }, { "id": 32000, "logprob": -13.3125, "text": "<image>" }, { "id": 32000, "logprob": -13.2734375, "text": "<image>" }, { "id": 32000, "logprob": -12.75, "text": "<image>" }, { "id": 32000, "logprob": -13.59375, "text": "<image>" }, { "id": 32000, "logprob": -13.8203125, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -13.765625, "text": "<image>" }, { "id": 32000, "logprob": -13.0625, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -14.1484375, "text": "<image>" }, { "id": 32000, "logprob": -13.25, "text": "<image>" }, { "id": 32000, "logprob": -11.9609375, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -15.0, "text": "<image>" }, { "id": 32000, "logprob": -12.9296875, "text": "<image>" }, { "id": 32000, "logprob": -12.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -11.9375, "text": "<image>" }, { "id": 32000, "logprob": -11.890625, "text": "<image>" }, { "id": 32000, "logprob": -12.5625, "text": "<image>" }, { "id": 32000, "logprob": -11.4453125, "text": "<image>" }, { "id": 32000, "logprob": -13.453125, "text": "<image>" }, { "id": 32000, "logprob": -11.109375, "text": "<image>" }, { "id": 32000, "logprob": -11.2890625, "text": "<image>" }, { "id": 32000, "logprob": -11.09375, "text": "<image>" }, { "id": 32000, "logprob": -11.1875, "text": "<image>" }, { "id": 32000, "logprob": -10.96875, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -11.4296875, "text": "<image>" }, { "id": 32000, "logprob": -12.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -11.3125, "text": "<image>" }, { "id": 32000, "logprob": -11.1171875, "text": "<image>" }, { "id": 32000, "logprob": -11.75, "text": "<image>" }, { "id": 32000, "logprob": -10.921875, "text": "<image>" }, { "id": 32000, "logprob": -10.7890625, "text": "<image>" }, { "id": 32000, "logprob": -11.1015625, "text": "<image>" }, { "id": 32000, "logprob": -11.4296875, "text": "<image>" }, { "id": 32000, "logprob": -12.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.7265625, "text": "<image>" }, { "id": 32000, "logprob": -12.6015625, "text": "<image>" }, { "id": 32000, "logprob": -12.6796875, "text": "<image>" }, { "id": 32000, "logprob": -12.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.296875, "text": "<image>" }, { "id": 32000, "logprob": -13.3671875, "text": "<image>" }, { "id": 32000, "logprob": -12.9765625, "text": "<image>" }, { "id": 32000, "logprob": -12.109375, "text": "<image>" }, { "id": 32000, "logprob": -12.53125, "text": "<image>" }, { "id": 32000, "logprob": -12.6015625, "text": "<image>" }, { "id": 32000, "logprob": -12.625, "text": "<image>" }, { "id": 32000, "logprob": -12.109375, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -13.4296875, "text": "<image>" }, { "id": 32000, "logprob": -13.9140625, "text": "<image>" }, { "id": 32000, "logprob": -14.71875, "text": "<image>" }, { "id": 32000, "logprob": -13.6015625, "text": "<image>" }, { "id": 32000, "logprob": -13.7578125, "text": "<image>" }, { "id": 32000, "logprob": -14.0703125, "text": "<image>" }, { "id": 32000, "logprob": -14.5390625, "text": "<image>" }, { "id": 32000, "logprob": -13.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -11.421875, "text": "<image>" }, { "id": 32000, "logprob": -11.625, "text": "<image>" }, { "id": 32000, "logprob": -12.1640625, "text": "<image>" }, { "id": 32000, "logprob": -13.2421875, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -11.4296875, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -12.0703125, "text": "<image>" }, { "id": 32000, "logprob": -11.2421875, "text": "<image>" }, { "id": 32000, "logprob": -11.59375, "text": "<image>" }, { "id": 32000, "logprob": -11.3125, "text": "<image>" }, { "id": 32000, "logprob": -11.453125, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -12.2109375, "text": "<image>" }, { "id": 32000, "logprob": -12.203125, "text": "<image>" }, { "id": 32000, "logprob": -12.03125, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -11.25, "text": "<image>" }, { "id": 32000, "logprob": -12.3203125, "text": "<image>" }, { "id": 32000, "logprob": -11.0390625, "text": "<image>" }, { "id": 32000, "logprob": -11.1953125, "text": "<image>" }, { "id": 32000, "logprob": -11.1171875, "text": "<image>" }, { "id": 32000, "logprob": -11.9375, "text": "<image>" }, { "id": 32000, "logprob": -10.984375, "text": "<image>" }, { "id": 32000, "logprob": -11.390625, "text": "<image>" }, { "id": 32000, "logprob": -12.8828125, "text": "<image>" }, { "id": 32000, "logprob": -14.15625, "text": "<image>" }, { "id": 32000, "logprob": -11.75, "text": "<image>" }, { "id": 32000, "logprob": -12.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.2421875, "text": "<image>" }, { "id": 32000, "logprob": -12.1171875, "text": "<image>" }, { "id": 32000, "logprob": -12.515625, "text": "<image>" }, { "id": 32000, "logprob": -11.890625, "text": "<image>" }, { "id": 32000, "logprob": -13.171875, "text": "<image>" }, { "id": 32000, "logprob": -15.796875, "text": "<image>" }, { "id": 32000, "logprob": -11.6015625, "text": "<image>" }, { "id": 32000, "logprob": -13.328125, "text": "<image>" }, { "id": 32000, "logprob": -13.6484375, "text": "<image>" }, { "id": 32000, "logprob": -14.875, "text": "<image>" }, { "id": 32000, "logprob": -13.7265625, "text": "<image>" }, { "id": 32000, "logprob": -13.5234375, "text": "<image>" }, { "id": 32000, "logprob": -13.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.609375, "text": "<image>" }, { "id": 32000, "logprob": -13.7890625, "text": "<image>" }, { "id": 32000, "logprob": -14.359375, "text": "<image>" }, { "id": 32000, "logprob": -12.1484375, "text": "<image>" }, { "id": 32000, "logprob": -11.8125, "text": "<image>" }, { "id": 32000, "logprob": -11.890625, "text": "<image>" }, { "id": 32000, "logprob": -11.4921875, "text": "<image>" }, { "id": 32000, "logprob": -12.5703125, "text": "<image>" }, { "id": 32000, "logprob": -11.625, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.578125, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -12.34375, "text": "<image>" }, { "id": 32000, "logprob": -11.234375, "text": "<image>" }, { "id": 32000, "logprob": -11.03125, "text": "<image>" }, { "id": 32000, "logprob": -11.7265625, "text": "<image>" }, { "id": 32000, "logprob": -11.84375, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -11.2421875, "text": "<image>" }, { "id": 32000, "logprob": -11.328125, "text": "<image>" }, { "id": 32000, "logprob": -11.953125, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.3984375, "text": "<image>" }, { "id": 32000, "logprob": -11.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.203125, "text": "<image>" }, { "id": 32000, "logprob": -13.609375, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.578125, "text": "<image>" }, { "id": 32000, "logprob": -11.65625, "text": "<image>" }, { "id": 32000, "logprob": -12.25, "text": "<image>" }, { "id": 32000, "logprob": -11.7109375, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -11.3359375, "text": "<image>" }, { "id": 32000, "logprob": -11.5859375, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -11.3046875, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -12.6953125, "text": "<image>" }, { "id": 32000, "logprob": -12.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.875, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -13.1875, "text": "<image>" }, { "id": 32000, "logprob": -13.7265625, "text": "<image>" }, { "id": 32000, "logprob": -12.4296875, "text": "<image>" }, { "id": 32000, "logprob": -13.8203125, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.09375, "text": "<image>" }, { "id": 32000, "logprob": -14.546875, "text": "<image>" }, { "id": 32000, "logprob": -14.015625, "text": "<image>" }, { "id": 32000, "logprob": -12.5546875, "text": "<image>" }, { "id": 32000, "logprob": -11.7890625, "text": "<image>" }, { "id": 32000, "logprob": -10.6953125, "text": "<image>" }, { "id": 32000, "logprob": -11.59375, "text": "<image>" }, { "id": 32000, "logprob": -12.0234375, "text": "<image>" }, { "id": 32000, "logprob": -12.734375, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -14.34375, "text": "<image>" }, { "id": 32000, "logprob": -11.8984375, "text": "<image>" }, { "id": 32000, "logprob": -13.4375, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -11.03125, "text": "<image>" }, { "id": 32000, "logprob": -11.421875, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -11.2734375, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -11.6640625, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -11.484375, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.875, "text": "<image>" }, { "id": 32000, "logprob": -12.3125, "text": "<image>" }, { "id": 32000, "logprob": -11.65625, "text": "<image>" }, { "id": 32000, "logprob": -12.9140625, "text": "<image>" }, { "id": 32000, "logprob": -11.7421875, "text": "<image>" }, { "id": 32000, "logprob": -11.7265625, "text": "<image>" }, { "id": 32000, "logprob": -11.3984375, "text": "<image>" }, { "id": 32000, "logprob": -11.53125, "text": "<image>" }, { "id": 32000, "logprob": -11.34375, "text": "<image>" }, { "id": 32000, "logprob": -11.875, "text": "<image>" }, { "id": 32000, "logprob": -12.890625, "text": "<image>" }, { "id": 32000, "logprob": -10.9609375, "text": "<image>" }, { "id": 32000, "logprob": -13.265625, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -14.6640625, "text": "<image>" }, { "id": 32000, "logprob": -12.3828125, "text": "<image>" }, { "id": 32000, "logprob": -14.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -15.125, "text": "<image>" }, { "id": 32000, "logprob": -12.6640625, "text": "<image>" }, { "id": 32000, "logprob": -12.734375, "text": "<image>" }, { "id": 32000, "logprob": -14.734375, "text": "<image>" }, { "id": 32000, "logprob": -13.5234375, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -13.40625, "text": "<image>" }, { "id": 32000, "logprob": -14.3828125, "text": "<image>" }, { "id": 32000, "logprob": -12.9453125, "text": "<image>" }, { "id": 32000, "logprob": -12.609375, "text": "<image>" }, { "id": 32000, "logprob": -11.9296875, "text": "<image>" }, { "id": 32000, "logprob": -12.953125, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.0078125, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -13.8828125, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -13.84375, "text": "<image>" }, { "id": 32000, "logprob": -14.25, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.09375, "text": "<image>" }, { "id": 32000, "logprob": -13.0078125, "text": "<image>" }, { "id": 32000, "logprob": -12.140625, "text": "<image>" }, { "id": 32000, "logprob": -13.1171875, "text": "<image>" }, { "id": 32000, "logprob": -13.96875, "text": "<image>" }, { "id": 32000, "logprob": -11.625, "text": "<image>" }, { "id": 32000, "logprob": -11.484375, "text": "<image>" }, { "id": 32000, "logprob": -11.328125, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -13.5859375, "text": "<image>" }, { "id": 32000, "logprob": -11.0, "text": "<image>" }, { "id": 32000, "logprob": -12.03125, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -11.46875, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -11.7578125, "text": "<image>" }, { "id": 32000, "logprob": -11.296875, "text": "<image>" }, { "id": 32000, "logprob": -12.2109375, "text": "<image>" }, { "id": 32000, "logprob": -11.7421875, "text": "<image>" }, { "id": 32000, "logprob": -10.921875, "text": "<image>" }, { "id": 32000, "logprob": -11.953125, "text": "<image>" }, { "id": 32000, "logprob": -13.203125, "text": "<image>" }, { "id": 32000, "logprob": -11.5625, "text": "<image>" }, { "id": 32000, "logprob": -14.7421875, "text": "<image>" }, { "id": 32000, "logprob": -12.6875, "text": "<image>" }, { "id": 32000, "logprob": -12.2578125, "text": "<image>" }, { "id": 32000, "logprob": -15.296875, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -12.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -13.6875, "text": "<image>" }, { "id": 32000, "logprob": -12.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -13.6796875, "text": "<image>" }, { "id": 32000, "logprob": -14.109375, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.6953125, "text": "<image>" }, { "id": 32000, "logprob": -11.84375, "text": "<image>" }, { "id": 32000, "logprob": -11.7421875, "text": "<image>" }, { "id": 32000, "logprob": -12.7578125, "text": "<image>" }, { "id": 32000, "logprob": -11.8203125, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.453125, "text": "<image>" }, { "id": 32000, "logprob": -11.359375, "text": "<image>" }, { "id": 32000, "logprob": -12.984375, "text": "<image>" }, { "id": 32000, "logprob": -15.15625, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -12.765625, "text": "<image>" }, { "id": 32000, "logprob": -13.4921875, "text": "<image>" }, { "id": 32000, "logprob": -11.5546875, "text": "<image>" }, { "id": 32000, "logprob": -11.6953125, "text": "<image>" }, { "id": 32000, "logprob": -11.359375, "text": "<image>" }, { "id": 32000, "logprob": -11.5, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -14.5625, "text": "<image>" }, { "id": 32000, "logprob": -13.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.1953125, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.7578125, "text": "<image>" }, { "id": 32000, "logprob": -11.6015625, "text": "<image>" }, { "id": 32000, "logprob": -13.2578125, "text": "<image>" }, { "id": 32000, "logprob": -11.125, "text": "<image>" }, { "id": 32000, "logprob": -11.3359375, "text": "<image>" }, { "id": 32000, "logprob": -11.3984375, "text": "<image>" }, { "id": 32000, "logprob": -11.8125, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -11.765625, "text": "<image>" }, { "id": 32000, "logprob": -11.1953125, "text": "<image>" }, { "id": 32000, "logprob": -13.3359375, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -12.703125, "text": "<image>" }, { "id": 32000, "logprob": -12.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.3203125, "text": "<image>" }, { "id": 32000, "logprob": -13.6171875, "text": "<image>" }, { "id": 32000, "logprob": -13.328125, "text": "<image>" }, { "id": 32000, "logprob": -12.828125, "text": "<image>" }, { "id": 32000, "logprob": -13.140625, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.796875, "text": "<image>" }, { "id": 32000, "logprob": -12.78125, "text": "<image>" }, { "id": 32000, "logprob": -12.59375, "text": "<image>" }, { "id": 32000, "logprob": -12.765625, "text": "<image>" }, { "id": 32000, "logprob": -11.46875, "text": "<image>" }, { "id": 32000, "logprob": -12.484375, "text": "<image>" }, { "id": 32000, "logprob": -11.59375, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -12.59375, "text": "<image>" }, { "id": 32000, "logprob": -11.3125, "text": "<image>" }, { "id": 32000, "logprob": -14.25, "text": "<image>" }, { "id": 32000, "logprob": -12.0234375, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -11.609375, "text": "<image>" }, { "id": 32000, "logprob": -13.5234375, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -12.6953125, "text": "<image>" }, { "id": 32000, "logprob": -11.703125, "text": "<image>" }, { "id": 32000, "logprob": -11.8828125, "text": "<image>" }, { "id": 32000, "logprob": -11.84375, "text": "<image>" }, { "id": 32000, "logprob": -11.7890625, "text": "<image>" }, { "id": 32000, "logprob": -12.3515625, "text": "<image>" }, { "id": 32000, "logprob": -14.3046875, "text": "<image>" }, { "id": 32000, "logprob": -11.7578125, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -11.328125, "text": "<image>" }, { "id": 32000, "logprob": -14.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.25, "text": "<image>" }, { "id": 32000, "logprob": -11.3125, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -11.875, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -13.71875, "text": "<image>" }, { "id": 32000, "logprob": -12.0625, "text": "<image>" }, { "id": 32000, "logprob": -11.046875, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -13.390625, "text": "<image>" }, { "id": 32000, "logprob": -12.515625, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -14.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.5625, "text": "<image>" }, { "id": 32000, "logprob": -11.53125, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.46875, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -12.515625, "text": "<image>" }, { "id": 32000, "logprob": -11.7890625, "text": "<image>" }, { "id": 32000, "logprob": -13.1328125, "text": "<image>" }, { "id": 32000, "logprob": -12.8984375, "text": "<image>" }, { "id": 32000, "logprob": -14.609375, "text": "<image>" }, { "id": 32000, "logprob": -11.53125, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -13.0625, "text": "<image>" }, { "id": 32000, "logprob": -11.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -12.0234375, "text": "<image>" }, { "id": 32000, "logprob": -12.453125, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.9296875, "text": "<image>" }, { "id": 32000, "logprob": -11.7109375, "text": "<image>" }, { "id": 32000, "logprob": -11.546875, "text": "<image>" }, { "id": 32000, "logprob": -12.796875, "text": "<image>" }, { "id": 32000, "logprob": -13.0234375, "text": "<image>" }, { "id": 32000, "logprob": -12.8125, "text": "<image>" }, { "id": 32000, "logprob": -12.59375, "text": "<image>" }, { "id": 32000, "logprob": -10.7890625, "text": "<image>" }, { "id": 32000, "logprob": -11.96875, "text": "<image>" }, { "id": 32000, "logprob": -11.4921875, "text": "<image>" }, { "id": 32000, "logprob": -11.25, "text": "<image>" }, { "id": 32000, "logprob": -11.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.6953125, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -11.515625, "text": "<image>" }, { "id": 32000, "logprob": -11.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -11.2109375, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.6484375, "text": "<image>" }, { "id": 32000, "logprob": -11.515625, "text": "<image>" }, { "id": 32000, "logprob": -12.8125, "text": "<image>" }, { "id": 32000, "logprob": -12.1953125, "text": "<image>" }, { "id": 32000, "logprob": -13.2890625, "text": "<image>" }, { "id": 32000, "logprob": -11.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.3359375, "text": "<image>" }, { "id": 32000, "logprob": -12.6015625, "text": "<image>" }, { "id": 32000, "logprob": -13.0078125, "text": "<image>" }, { "id": 32000, "logprob": -15.328125, "text": "<image>" }, { "id": 32000, "logprob": -14.8125, "text": "<image>" }, { "id": 32000, "logprob": -11.0390625, "text": "<image>" }, { "id": 32000, "logprob": -13.0703125, "text": "<image>" }, { "id": 32000, "logprob": -11.3828125, "text": "<image>" }, { "id": 32000, "logprob": -14.0859375, "text": "<image>" }, { "id": 32000, "logprob": -14.734375, "text": "<image>" }, { "id": 32000, "logprob": -10.0234375, "text": "<image>" }, { "id": 32000, "logprob": -11.46875, "text": "<image>" }, { "id": 32000, "logprob": -13.953125, "text": "<image>" }, { "id": 32000, "logprob": -14.28125, "text": "<image>" }, { "id": 32000, "logprob": -14.8828125, "text": "<image>" }, { "id": 32000, "logprob": -11.578125, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -12.34375, "text": "<image>" }, { "id": 32000, "logprob": -11.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.0078125, "text": "<image>" }, { "id": 32000, "logprob": -12.4296875, "text": "<image>" }, { "id": 32000, "logprob": -11.8984375, "text": "<image>" }, { "id": 32000, "logprob": -11.984375, "text": "<image>" }, { "id": 32000, "logprob": -11.734375, "text": "<image>" }, { "id": 32000, "logprob": -11.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.1875, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -12.953125, "text": "<image>" }, { "id": 32000, "logprob": -10.40625, "text": "<image>" }, { "id": 32000, "logprob": -13.1015625, "text": "<image>" }, { "id": 32000, "logprob": -12.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.7890625, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -11.765625, "text": "<image>" }, { "id": 32000, "logprob": -11.578125, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.34375, "text": "<image>" }, { "id": 32000, "logprob": -12.015625, "text": "<image>" }, { "id": 32000, "logprob": -11.75, "text": "<image>" }, { "id": 32000, "logprob": -12.2578125, "text": "<image>" }, { "id": 32000, "logprob": -11.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -12.4453125, "text": "<image>" }, { "id": 32000, "logprob": -11.8671875, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -15.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -12.25, "text": "<image>" }, { "id": 32000, "logprob": -14.421875, "text": "<image>" }, { "id": 32000, "logprob": -13.03125, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -14.8046875, "text": "<image>" }, { "id": 32000, "logprob": -13.53125, "text": "<image>" }, { "id": 32000, "logprob": -15.03125, "text": "<image>" }, { "id": 32000, "logprob": -13.8984375, "text": "<image>" }, { "id": 32000, "logprob": -10.578125, "text": "<image>" }, { "id": 32000, "logprob": -15.0, "text": "<image>" }, { "id": 32000, "logprob": -12.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.8203125, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -11.15625, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -11.515625, "text": "<image>" }, { "id": 32000, "logprob": -11.4765625, "text": "<image>" }, { "id": 32000, "logprob": -11.4765625, "text": "<image>" }, { "id": 32000, "logprob": -11.4609375, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.9609375, "text": "<image>" }, { "id": 32000, "logprob": -12.8515625, "text": "<image>" }, { "id": 32000, "logprob": -12.03125, "text": "<image>" }, { "id": 32000, "logprob": -13.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.3203125, "text": "<image>" }, { "id": 32000, "logprob": -10.8515625, "text": "<image>" }, { "id": 32000, "logprob": -10.671875, "text": "<image>" }, { "id": 32000, "logprob": -11.875, "text": "<image>" }, { "id": 32000, "logprob": -10.9140625, "text": "<image>" }, { "id": 32000, "logprob": -10.65625, "text": "<image>" }, { "id": 32000, "logprob": -10.484375, "text": "<image>" }, { "id": 32000, "logprob": -11.1796875, "text": "<image>" }, { "id": 32000, "logprob": -12.2890625, "text": "<image>" }, { "id": 32000, "logprob": -10.96875, "text": "<image>" }, { "id": 32000, "logprob": -11.6015625, "text": "<image>" }, { "id": 32000, "logprob": -11.4453125, "text": "<image>" }, { "id": 32000, "logprob": -10.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.1640625, "text": "<image>" }, { "id": 32000, "logprob": -11.0859375, "text": "<image>" }, { "id": 32000, "logprob": -11.546875, "text": "<image>" }, { "id": 32000, "logprob": -15.765625, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.484375, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -10.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.25, "text": "<image>" }, { "id": 32000, "logprob": -11.828125, "text": "<image>" }, { "id": 32000, "logprob": -11.84375, "text": "<image>" }, { "id": 32000, "logprob": -11.0625, "text": "<image>" }, { "id": 32000, "logprob": -13.953125, "text": "<image>" }, { "id": 32000, "logprob": -13.5703125, "text": "<image>" }, { "id": 32000, "logprob": -13.1796875, "text": "<image>" }, { "id": 32000, "logprob": -13.6171875, "text": "<image>" }, { "id": 32000, "logprob": -14.0078125, "text": "<image>" }, { "id": 32000, "logprob": -11.171875, "text": "<image>" }, { "id": 32000, "logprob": -14.140625, "text": "<image>" }, { "id": 32000, "logprob": -12.25, "text": "<image>" }, { "id": 32000, "logprob": -11.7265625, "text": "<image>" }, { "id": 32000, "logprob": -12.3203125, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -11.8828125, "text": "<image>" }, { "id": 32000, "logprob": -12.3125, "text": "<image>" }, { "id": 32000, "logprob": -11.5625, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -14.6796875, "text": "<image>" }, { "id": 32000, "logprob": -11.046875, "text": "<image>" }, { "id": 32000, "logprob": -11.5, "text": "<image>" }, { "id": 32000, "logprob": -12.8125, "text": "<image>" }, { "id": 32000, "logprob": -11.9296875, "text": "<image>" }, { "id": 32000, "logprob": -10.546875, "text": "<image>" }, { "id": 32000, "logprob": -11.7109375, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -12.0859375, "text": "<image>" }, { "id": 32000, "logprob": -11.4296875, "text": "<image>" }, { "id": 32000, "logprob": -12.46875, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -11.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.96875, "text": "<image>" }, { "id": 32000, "logprob": -10.875, "text": "<image>" }, { "id": 32000, "logprob": -10.890625, "text": "<image>" }, { "id": 32000, "logprob": -11.0078125, "text": "<image>" }, { "id": 32000, "logprob": -10.609375, "text": "<image>" }, { "id": 32000, "logprob": -10.7890625, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -11.390625, "text": "<image>" }, { "id": 32000, "logprob": -11.2890625, "text": "<image>" }, { "id": 32000, "logprob": -11.984375, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -12.2421875, "text": "<image>" }, { "id": 32000, "logprob": -11.3828125, "text": "<image>" }, { "id": 32000, "logprob": -10.7734375, "text": "<image>" }, { "id": 32000, "logprob": -10.703125, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -12.0, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -11.4453125, "text": "<image>" }, { "id": 32000, "logprob": -9.6328125, "text": "<image>" }, { "id": 32000, "logprob": -10.859375, "text": "<image>" }, { "id": 32000, "logprob": -13.03125, "text": "<image>" }, { "id": 32000, "logprob": -12.890625, "text": "<image>" }, { "id": 32000, "logprob": -13.4765625, "text": "<image>" }, { "id": 32000, "logprob": -14.109375, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -13.484375, "text": "<image>" }, { "id": 32000, "logprob": -13.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -10.90625, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -12.5703125, "text": "<image>" }, { "id": 32000, "logprob": -12.7734375, "text": "<image>" }, { "id": 32000, "logprob": -12.890625, "text": "<image>" }, { "id": 32000, "logprob": -13.0625, "text": "<image>" }, { "id": 32000, "logprob": -11.2265625, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -12.2890625, "text": "<image>" }, { "id": 32000, "logprob": -11.859375, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -15.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -11.0, "text": "<image>" }, { "id": 32000, "logprob": -11.2734375, "text": "<image>" }, { "id": 32000, "logprob": -12.3203125, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -10.9296875, "text": "<image>" }, { "id": 32000, "logprob": -10.921875, "text": "<image>" }, { "id": 32000, "logprob": -10.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.59375, "text": "<image>" }, { "id": 32000, "logprob": -11.40625, "text": "<image>" }, { "id": 32000, "logprob": -11.53125, "text": "<image>" }, { "id": 32000, "logprob": -10.8046875, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -11.6875, "text": "<image>" }, { "id": 32000, "logprob": -11.6484375, "text": "<image>" }, { "id": 32000, "logprob": -12.5078125, "text": "<image>" }, { "id": 32000, "logprob": -12.1171875, "text": "<image>" }, { "id": 32000, "logprob": -12.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -12.0, "text": "<image>" }, { "id": 32000, "logprob": -13.7265625, "text": "<image>" }, { "id": 32000, "logprob": -15.7734375, "text": "<image>" }, { "id": 32000, "logprob": -11.65625, "text": "<image>" }, { "id": 32000, "logprob": -12.359375, "text": "<image>" }, { "id": 32000, "logprob": -12.1484375, "text": "<image>" }, { "id": 32000, "logprob": -14.21875, "text": "<image>" }, { "id": 32000, "logprob": -14.796875, "text": "<image>" }, { "id": 32000, "logprob": -17.5625, "text": "<image>" }, { "id": 32000, "logprob": -15.234375, "text": "<image>" }, { "id": 32000, "logprob": -14.4765625, "text": "<image>" }, { "id": 32000, "logprob": -10.6015625, "text": "<image>" }, { "id": 32000, "logprob": -12.0703125, "text": "<image>" }, { "id": 32000, "logprob": -12.84375, "text": "<image>" }, { "id": 32000, "logprob": -12.2421875, "text": "<image>" }, { "id": 32000, "logprob": -13.203125, "text": "<image>" }, { "id": 32000, "logprob": -11.3203125, "text": "<image>" }, { "id": 32000, "logprob": -10.9375, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -12.1015625, "text": "<image>" }, { "id": 32000, "logprob": -10.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.8125, "text": "<image>" }, { "id": 32000, "logprob": -11.8203125, "text": "<image>" }, { "id": 32000, "logprob": -12.296875, "text": "<image>" }, { "id": 32000, "logprob": -10.8046875, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -15.2265625, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -12.015625, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -11.109375, "text": "<image>" }, { "id": 32000, "logprob": -12.703125, "text": "<image>" }, { "id": 32000, "logprob": -11.6328125, "text": "<image>" }, { "id": 32000, "logprob": -11.515625, "text": "<image>" }, { "id": 32000, "logprob": -11.546875, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.453125, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -12.9609375, "text": "<image>" }, { "id": 32000, "logprob": -12.578125, "text": "<image>" }, { "id": 32000, "logprob": -11.8671875, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -11.125, "text": "<image>" }, { "id": 32000, "logprob": -11.5, "text": "<image>" }, { "id": 32000, "logprob": -12.1484375, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -14.453125, "text": "<image>" }, { "id": 32000, "logprob": -12.7890625, "text": "<image>" }, { "id": 32000, "logprob": -14.890625, "text": "<image>" }, { "id": 32000, "logprob": -12.984375, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -14.7421875, "text": "<image>" }, { "id": 32000, "logprob": -13.34375, "text": "<image>" }, { "id": 32000, "logprob": -13.3828125, "text": "<image>" }, { "id": 32000, "logprob": -14.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.71875, "text": "<image>" }, { "id": 32000, "logprob": -14.8203125, "text": "<image>" }, { "id": 32000, "logprob": -14.625, "text": "<image>" }, { "id": 32000, "logprob": -12.6875, "text": "<image>" }, { "id": 32000, "logprob": -12.4296875, "text": "<image>" }, { "id": 32000, "logprob": -12.6171875, "text": "<image>" }, { "id": 32000, "logprob": -13.765625, "text": "<image>" }, { "id": 32000, "logprob": -13.8046875, "text": "<image>" }, { "id": 32000, "logprob": -11.4921875, "text": "<image>" }, { "id": 32000, "logprob": -12.5078125, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -13.796875, "text": "<image>" }, { "id": 32000, "logprob": -11.203125, "text": "<image>" }, { "id": 32000, "logprob": -11.78125, "text": "<image>" }, { "id": 32000, "logprob": -16.53125, "text": "<image>" }, { "id": 32000, "logprob": -12.296875, "text": "<image>" }, { "id": 32000, "logprob": -14.1484375, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -12.4921875, "text": "<image>" }, { "id": 32000, "logprob": -10.59375, "text": "<image>" }, { "id": 32000, "logprob": -12.9296875, "text": "<image>" }, { "id": 32000, "logprob": -13.6328125, "text": "<image>" }, { "id": 32000, "logprob": -12.3203125, "text": "<image>" }, { "id": 32000, "logprob": -12.359375, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -12.1953125, "text": "<image>" }, { "id": 32000, "logprob": -12.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.6484375, "text": "<image>" }, { "id": 32000, "logprob": -11.109375, "text": "<image>" }, { "id": 32000, "logprob": -11.140625, "text": "<image>" }, { "id": 32000, "logprob": -13.90625, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -14.9921875, "text": "<image>" }, { "id": 32000, "logprob": -13.375, "text": "<image>" }, { "id": 32000, "logprob": -14.703125, "text": "<image>" }, { "id": 32000, "logprob": -11.328125, "text": "<image>" }, { "id": 32000, "logprob": -14.890625, "text": "<image>" }, { "id": 32000, "logprob": -13.1875, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -13.3984375, "text": "<image>" }, { "id": 32000, "logprob": -13.9453125, "text": "<image>" }, { "id": 32000, "logprob": -13.1875, "text": "<image>" }, { "id": 32000, "logprob": -11.28125, "text": "<image>" }, { "id": 32000, "logprob": -15.1640625, "text": "<image>" }, { "id": 32000, "logprob": -15.0234375, "text": "<image>" }, { "id": 32000, "logprob": -14.8203125, "text": "<image>" }, { "id": 32000, "logprob": -14.828125, "text": "<image>" }, { "id": 32000, "logprob": -14.078125, "text": "<image>" }, { "id": 32000, "logprob": -13.8984375, "text": "<image>" }, { "id": 32000, "logprob": -15.3125, "text": "<image>" }, { "id": 32000, "logprob": -12.8125, "text": "<image>" }, { "id": 32000, "logprob": -12.90625, "text": "<image>" }, { "id": 32000, "logprob": -13.046875, "text": "<image>" }, { "id": 32000, "logprob": -10.625, "text": "<image>" }, { "id": 32000, "logprob": -11.25, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -13.3984375, "text": "<image>" }, { "id": 32000, "logprob": -12.484375, "text": "<image>" }, { "id": 32000, "logprob": -12.2578125, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -12.6015625, "text": "<image>" }, { "id": 32000, "logprob": -12.421875, "text": "<image>" }, { "id": 32000, "logprob": -12.484375, "text": "<image>" }, { "id": 32000, "logprob": -14.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.3125, "text": "<image>" }, { "id": 32000, "logprob": -12.578125, "text": "<image>" }, { "id": 32000, "logprob": -15.1953125, "text": "<image>" }, { "id": 32000, "logprob": -12.4921875, "text": "<image>" }, { "id": 32000, "logprob": -11.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -12.15625, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.7734375, "text": "<image>" }, { "id": 32000, "logprob": -11.3046875, "text": "<image>" }, { "id": 32000, "logprob": -11.109375, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -13.0234375, "text": "<image>" }, { "id": 32000, "logprob": -10.9296875, "text": "<image>" }, { "id": 32000, "logprob": -13.5078125, "text": "<image>" }, { "id": 32000, "logprob": -11.5, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -11.703125, "text": "<image>" }, { "id": 32000, "logprob": -14.0234375, "text": "<image>" }, { "id": 32000, "logprob": -15.2578125, "text": "<image>" }, { "id": 32000, "logprob": -17.953125, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -11.6875, "text": "<image>" }, { "id": 32000, "logprob": -12.1171875, "text": "<image>" }, { "id": 32000, "logprob": -11.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.90625, "text": "<image>" }, { "id": 32000, "logprob": -12.6171875, "text": "<image>" }, { "id": 32000, "logprob": -13.53125, "text": "<image>" }, { "id": 32000, "logprob": -14.828125, "text": "<image>" }, { "id": 32000, "logprob": -13.953125, "text": "<image>" }, { "id": 32000, "logprob": -11.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.7421875, "text": "<image>" }, { "id": 32000, "logprob": -13.484375, "text": "<image>" }, { "id": 32000, "logprob": -11.4609375, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.984375, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -15.9375, "text": "<image>" }, { "id": 32000, "logprob": -11.984375, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -12.03125, "text": "<image>" }, { "id": 32000, "logprob": -14.3046875, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -12.1171875, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -13.1484375, "text": "<image>" }, { "id": 32000, "logprob": -12.1875, "text": "<image>" }, { "id": 32000, "logprob": -13.359375, "text": "<image>" }, { "id": 32000, "logprob": -13.9921875, "text": "<image>" }, { "id": 32000, "logprob": -12.2890625, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -10.7421875, "text": "<image>" }, { "id": 32000, "logprob": -10.515625, "text": "<image>" }, { "id": 32000, "logprob": -12.6640625, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -11.484375, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -11.890625, "text": "<image>" }, { "id": 32000, "logprob": -11.140625, "text": "<image>" }, { "id": 32000, "logprob": -10.96875, "text": "<image>" }, { "id": 32000, "logprob": -11.609375, "text": "<image>" }, { "id": 32000, "logprob": -12.4609375, "text": "<image>" }, { "id": 32000, "logprob": -12.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.8203125, "text": "<image>" }, { "id": 32000, "logprob": -13.359375, "text": "<image>" }, { "id": 32000, "logprob": -12.0234375, "text": "<image>" }, { "id": 32000, "logprob": -12.484375, "text": "<image>" }, { "id": 32000, "logprob": -14.2890625, "text": "<image>" }, { "id": 32000, "logprob": -11.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.890625, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -15.3046875, "text": "<image>" }, { "id": 32000, "logprob": -12.8828125, "text": "<image>" }, { "id": 32000, "logprob": -13.453125, "text": "<image>" }, { "id": 32000, "logprob": -11.8125, "text": "<image>" }, { "id": 32000, "logprob": -12.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.9765625, "text": "<image>" }, { "id": 32000, "logprob": -11.96875, "text": "<image>" }, { "id": 32000, "logprob": -13.4296875, "text": "<image>" }, { "id": 32000, "logprob": -14.9453125, "text": "<image>" }, { "id": 32000, "logprob": -13.28125, "text": "<image>" }, { "id": 32000, "logprob": -13.2578125, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -11.453125, "text": "<image>" }, { "id": 32000, "logprob": -12.1640625, "text": "<image>" }, { "id": 32000, "logprob": -10.3828125, "text": "<image>" }, { "id": 32000, "logprob": -11.8203125, "text": "<image>" }, { "id": 32000, "logprob": -13.203125, "text": "<image>" }, { "id": 32000, "logprob": -12.3359375, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -13.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.65625, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.9375, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -12.3515625, "text": "<image>" }, { "id": 32000, "logprob": -10.5625, "text": "<image>" }, { "id": 32000, "logprob": -10.90625, "text": "<image>" }, { "id": 32000, "logprob": -11.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.3828125, "text": "<image>" }, { "id": 32000, "logprob": -11.5234375, "text": "<image>" }, { "id": 32000, "logprob": -16.578125, "text": "<image>" }, { "id": 32000, "logprob": -12.515625, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -12.5390625, "text": "<image>" }, { "id": 32000, "logprob": -12.0078125, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -13.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -11.9296875, "text": "<image>" }, { "id": 32000, "logprob": -10.59375, "text": "<image>" }, { "id": 32000, "logprob": -10.53125, "text": "<image>" }, { "id": 32000, "logprob": -11.4375, "text": "<image>" }, { "id": 32000, "logprob": -11.2109375, "text": "<image>" }, { "id": 32000, "logprob": -13.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.15625, "text": "<image>" }, { "id": 32000, "logprob": -15.796875, "text": "<image>" }, { "id": 32000, "logprob": -10.9765625, "text": "<image>" }, { "id": 32000, "logprob": -11.6328125, "text": "<image>" }, { "id": 32000, "logprob": -10.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.859375, "text": "<image>" }, { "id": 32000, "logprob": -13.125, "text": "<image>" }, { "id": 32000, "logprob": -12.6640625, "text": "<image>" }, { "id": 32000, "logprob": -16.046875, "text": "<image>" }, { "id": 32000, "logprob": -11.515625, "text": "<image>" }, { "id": 32000, "logprob": -13.734375, "text": "<image>" }, { "id": 32000, "logprob": -14.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.2109375, "text": "<image>" }, { "id": 32000, "logprob": -10.65625, "text": "<image>" }, { "id": 32000, "logprob": -15.6484375, "text": "<image>" }, { "id": 32000, "logprob": -10.9296875, "text": "<image>" }, { "id": 32000, "logprob": -11.328125, "text": "<image>" }, { "id": 32000, "logprob": -11.0546875, "text": "<image>" }, { "id": 32000, "logprob": -10.4765625, "text": "<image>" }, { "id": 32000, "logprob": -10.96875, "text": "<image>" }, { "id": 32000, "logprob": -10.890625, "text": "<image>" }, { "id": 32000, "logprob": -10.8046875, "text": "<image>" }, { "id": 32000, "logprob": -12.359375, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -14.2734375, "text": "<image>" }, { "id": 32000, "logprob": -11.4921875, "text": "<image>" }, { "id": 32000, "logprob": -10.984375, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -13.21875, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -11.8828125, "text": "<image>" }, { "id": 32000, "logprob": -12.15625, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -12.0703125, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -13.0390625, "text": "<image>" }, { "id": 32000, "logprob": -12.1484375, "text": "<image>" }, { "id": 32000, "logprob": -13.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.203125, "text": "<image>" }, { "id": 32000, "logprob": -11.9375, "text": "<image>" }, { "id": 32000, "logprob": -12.1796875, "text": "<image>" }, { "id": 32000, "logprob": -10.7421875, "text": "<image>" }, { "id": 32000, "logprob": -10.8203125, "text": "<image>" }, { "id": 32000, "logprob": -12.375, "text": "<image>" }, { "id": 32000, "logprob": -11.484375, "text": "<image>" }, { "id": 32000, "logprob": -15.84375, "text": "<image>" }, { "id": 32000, "logprob": -12.1640625, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -13.1640625, "text": "<image>" }, { "id": 32000, "logprob": -16.8125, "text": "<image>" }, { "id": 32000, "logprob": -15.765625, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -12.46875, "text": "<image>" }, { "id": 32000, "logprob": -13.109375, "text": "<image>" }, { "id": 32000, "logprob": -14.5078125, "text": "<image>" }, { "id": 32000, "logprob": -14.5546875, "text": "<image>" }, { "id": 32000, "logprob": -10.953125, "text": "<image>" }, { "id": 32000, "logprob": -13.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.3984375, "text": "<image>" }, { "id": 32000, "logprob": -12.9375, "text": "<image>" }, { "id": 32000, "logprob": -12.953125, "text": "<image>" }, { "id": 32000, "logprob": -12.0234375, "text": "<image>" }, { "id": 32000, "logprob": -12.3125, "text": "<image>" }, { "id": 32000, "logprob": -17.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.5, "text": "<image>" }, { "id": 32000, "logprob": -12.484375, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.84375, "text": "<image>" }, { "id": 32000, "logprob": -13.515625, "text": "<image>" }, { "id": 32000, "logprob": -14.171875, "text": "<image>" }, { "id": 32000, "logprob": -12.9609375, "text": "<image>" }, { "id": 32000, "logprob": -11.265625, "text": "<image>" }, { "id": 32000, "logprob": -14.828125, "text": "<image>" }, { "id": 32000, "logprob": -11.1171875, "text": "<image>" }, { "id": 32000, "logprob": -11.953125, "text": "<image>" }, { "id": 32000, "logprob": -14.6640625, "text": "<image>" }, { "id": 32000, "logprob": -11.2421875, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -12.8125, "text": "<image>" }, { "id": 32000, "logprob": -11.25, "text": "<image>" }, { "id": 32000, "logprob": -11.8125, "text": "<image>" }, { "id": 32000, "logprob": -14.28125, "text": "<image>" }, { "id": 32000, "logprob": -11.09375, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.1875, "text": "<image>" }, { "id": 32000, "logprob": -13.765625, "text": "<image>" }, { "id": 32000, "logprob": -12.25, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -13.796875, "text": "<image>" }, { "id": 32000, "logprob": -10.6171875, "text": "<image>" }, { "id": 32000, "logprob": -11.546875, "text": "<image>" }, { "id": 32000, "logprob": -10.9609375, "text": "<image>" }, { "id": 32000, "logprob": -12.1171875, "text": "<image>" }, { "id": 32000, "logprob": -10.796875, "text": "<image>" }, { "id": 32000, "logprob": -12.6328125, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -13.140625, "text": "<image>" }, { "id": 32000, "logprob": -14.21875, "text": "<image>" }, { "id": 32000, "logprob": -13.0546875, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -11.6875, "text": "<image>" }, { "id": 32000, "logprob": -12.7265625, "text": "<image>" }, { "id": 32000, "logprob": -17.09375, "text": "<image>" }, { "id": 32000, "logprob": -15.4375, "text": "<image>" }, { "id": 32000, "logprob": -11.890625, "text": "<image>" }, { "id": 32000, "logprob": -13.6015625, "text": "<image>" }, { "id": 32000, "logprob": -11.4921875, "text": "<image>" }, { "id": 32000, "logprob": -11.8984375, "text": "<image>" }, { "id": 32000, "logprob": -13.375, "text": "<image>" }, { "id": 32000, "logprob": -13.234375, "text": "<image>" }, { "id": 32000, "logprob": -12.5390625, "text": "<image>" }, { "id": 32000, "logprob": -15.1171875, "text": "<image>" }, { "id": 32000, "logprob": -15.90625, "text": "<image>" }, { "id": 32000, "logprob": -12.828125, "text": "<image>" }, { "id": 32000, "logprob": -12.7109375, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -13.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.859375, "text": "<image>" }, { "id": 32000, "logprob": -11.6953125, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -10.59375, "text": "<image>" }, { "id": 32000, "logprob": -10.8828125, "text": "<image>" }, { "id": 32000, "logprob": -11.34375, "text": "<image>" }, { "id": 32000, "logprob": -12.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.515625, "text": "<image>" }, { "id": 32000, "logprob": -12.6796875, "text": "<image>" }, { "id": 32000, "logprob": -10.7578125, "text": "<image>" }, { "id": 32000, "logprob": -11.40625, "text": "<image>" }, { "id": 32000, "logprob": -10.75, "text": "<image>" }, { "id": 32000, "logprob": -11.2109375, "text": "<image>" }, { "id": 32000, "logprob": -10.875, "text": "<image>" }, { "id": 32000, "logprob": -13.1328125, "text": "<image>" }, { "id": 32000, "logprob": -10.53125, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -12.5234375, "text": "<image>" }, { "id": 32000, "logprob": -17.0, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -11.3203125, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -11.2421875, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -11.4765625, "text": "<image>" }, { "id": 32000, "logprob": -11.34375, "text": "<image>" }, { "id": 32000, "logprob": -12.6875, "text": "<image>" }, { "id": 32000, "logprob": -13.421875, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -13.46875, "text": "<image>" }, { "id": 32000, "logprob": -12.3125, "text": "<image>" }, { "id": 32000, "logprob": -11.6015625, "text": "<image>" }, { "id": 32000, "logprob": -13.359375, "text": "<image>" }, { "id": 32000, "logprob": -13.0625, "text": "<image>" }, { "id": 32000, "logprob": -11.2421875, "text": "<image>" }, { "id": 32000, "logprob": -15.34375, "text": "<image>" }, { "id": 32000, "logprob": -13.5, "text": "<image>" }, { "id": 32000, "logprob": -10.7734375, "text": "<image>" }, { "id": 32000, "logprob": -13.5390625, "text": "<image>" }, { "id": 32000, "logprob": -12.703125, "text": "<image>" }, { "id": 32000, "logprob": -13.1953125, "text": "<image>" }, { "id": 32000, "logprob": -17.171875, "text": "<image>" }, { "id": 32000, "logprob": -15.0234375, "text": "<image>" }, { "id": 32000, "logprob": -10.9609375, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -11.515625, "text": "<image>" }, { "id": 32000, "logprob": -16.125, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.609375, "text": "<image>" }, { "id": 32000, "logprob": -14.6015625, "text": "<image>" }, { "id": 32000, "logprob": -11.8984375, "text": "<image>" }, { "id": 32000, "logprob": -10.9375, "text": "<image>" }, { "id": 32000, "logprob": -13.3203125, "text": "<image>" }, { "id": 32000, "logprob": -11.78125, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -12.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.0, "text": "<image>" }, { "id": 32000, "logprob": -13.796875, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -12.4140625, "text": "<image>" }, { "id": 32000, "logprob": -14.3203125, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -13.875, "text": "<image>" }, { "id": 32000, "logprob": -14.3203125, "text": "<image>" }, { "id": 32000, "logprob": -12.375, "text": "<image>" }, { "id": 32000, "logprob": -13.9375, "text": "<image>" }, { "id": 32000, "logprob": -12.3203125, "text": "<image>" }, { "id": 32000, "logprob": -14.609375, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -10.6484375, "text": "<image>" }, { "id": 32000, "logprob": -12.0, "text": "<image>" }, { "id": 32000, "logprob": -11.0703125, "text": "<image>" }, { "id": 32000, "logprob": -11.5234375, "text": "<image>" }, { "id": 32000, "logprob": -11.5625, "text": "<image>" }, { "id": 32000, "logprob": -11.7265625, "text": "<image>" }, { "id": 32000, "logprob": -12.6640625, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -12.1171875, "text": "<image>" }, { "id": 32000, "logprob": -16.09375, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -13.4140625, "text": "<image>" }, { "id": 32000, "logprob": -13.453125, "text": "<image>" }, { "id": 32000, "logprob": -15.4921875, "text": "<image>" }, { "id": 32000, "logprob": -12.9140625, "text": "<image>" }, { "id": 32000, "logprob": -14.8125, "text": "<image>" }, { "id": 32000, "logprob": -13.375, "text": "<image>" }, { "id": 32000, "logprob": -12.0703125, "text": "<image>" }, { "id": 32000, "logprob": -15.234375, "text": "<image>" }, { "id": 32000, "logprob": -11.6875, "text": "<image>" }, { "id": 32000, "logprob": -10.5390625, "text": "<image>" }, { "id": 32000, "logprob": -14.703125, "text": "<image>" }, { "id": 32000, "logprob": -13.9140625, "text": "<image>" }, { "id": 32000, "logprob": -11.6875, "text": "<image>" }, { "id": 32000, "logprob": -17.453125, "text": "<image>" }, { "id": 32000, "logprob": -12.2578125, "text": "<image>" }, { "id": 32000, "logprob": -10.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.7890625, "text": "<image>" }, { "id": 32000, "logprob": -11.5, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -14.265625, "text": "<image>" }, { "id": 32000, "logprob": -13.6953125, "text": "<image>" }, { "id": 32000, "logprob": -9.796875, "text": "<image>" }, { "id": 32000, "logprob": -10.59375, "text": "<image>" }, { "id": 32000, "logprob": -12.890625, "text": "<image>" }, { "id": 32000, "logprob": -11.9375, "text": "<image>" }, { "id": 32000, "logprob": -14.734375, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -14.7265625, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -13.0, "text": "<image>" }, { "id": 32000, "logprob": -12.7890625, "text": "<image>" }, { "id": 32000, "logprob": -11.015625, "text": "<image>" }, { "id": 32000, "logprob": -16.421875, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -11.46875, "text": "<image>" }, { "id": 32000, "logprob": -16.53125, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -13.3515625, "text": "<image>" }, { "id": 32000, "logprob": -13.96875, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.5859375, "text": "<image>" }, { "id": 32000, "logprob": -12.84375, "text": "<image>" }, { "id": 32000, "logprob": -11.96875, "text": "<image>" }, { "id": 32000, "logprob": -12.515625, "text": "<image>" }, { "id": 32000, "logprob": -12.6328125, "text": "<image>" }, { "id": 32000, "logprob": -11.8046875, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -14.046875, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -17.203125, "text": "<image>" }, { "id": 32000, "logprob": -13.1640625, "text": "<image>" }, { "id": 32000, "logprob": -13.375, "text": "<image>" }, { "id": 32000, "logprob": -14.609375, "text": "<image>" }, { "id": 32000, "logprob": -12.953125, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -15.0234375, "text": "<image>" }, { "id": 32000, "logprob": -11.390625, "text": "<image>" }, { "id": 32000, "logprob": -15.9296875, "text": "<image>" }, { "id": 32000, "logprob": -10.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.828125, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -12.75, "text": "<image>" }, { "id": 32000, "logprob": -14.7734375, "text": "<image>" }, { "id": 32000, "logprob": -11.484375, "text": "<image>" }, { "id": 32000, "logprob": -11.2578125, "text": "<image>" }, { "id": 32000, "logprob": -10.734375, "text": "<image>" }, { "id": 32000, "logprob": -10.875, "text": "<image>" }, { "id": 32000, "logprob": -11.421875, "text": "<image>" }, { "id": 32000, "logprob": -12.96875, "text": "<image>" }, { "id": 32000, "logprob": -13.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.03125, "text": "<image>" }, { "id": 32000, "logprob": -17.015625, "text": "<image>" }, { "id": 32000, "logprob": -12.2109375, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -13.09375, "text": "<image>" }, { "id": 32000, "logprob": -11.84375, "text": "<image>" }, { "id": 32000, "logprob": -15.2578125, "text": "<image>" }, { "id": 32000, "logprob": -16.984375, "text": "<image>" }, { "id": 32000, "logprob": -11.0546875, "text": "<image>" }, { "id": 32000, "logprob": -11.4609375, "text": "<image>" }, { "id": 32000, "logprob": -11.8046875, "text": "<image>" }, { "id": 32000, "logprob": -11.140625, "text": "<image>" }, { "id": 32000, "logprob": -10.6875, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -17.984375, "text": "<image>" }, { "id": 32000, "logprob": -12.4140625, "text": "<image>" }, { "id": 32000, "logprob": -14.703125, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -14.4609375, "text": "<image>" }, { "id": 32000, "logprob": -11.7109375, "text": "<image>" }, { "id": 32000, "logprob": -12.828125, "text": "<image>" }, { "id": 32000, "logprob": -12.8359375, "text": "<image>" }, { "id": 32000, "logprob": -11.484375, "text": "<image>" }, { "id": 32000, "logprob": -13.5390625, "text": "<image>" }, { "id": 32000, "logprob": -16.21875, "text": "<image>" }, { "id": 32000, "logprob": -16.84375, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -14.359375, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -13.6953125, "text": "<image>" }, { "id": 32000, "logprob": -12.984375, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -17.59375, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -15.046875, "text": "<image>" }, { "id": 32000, "logprob": -16.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -13.3515625, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -11.3203125, "text": "<image>" }, { "id": 32000, "logprob": -11.359375, "text": "<image>" }, { "id": 32000, "logprob": -12.46875, "text": "<image>" }, { "id": 32000, "logprob": -11.5546875, "text": "<image>" }, { "id": 32000, "logprob": -11.3125, "text": "<image>" }, { "id": 32000, "logprob": -13.1640625, "text": "<image>" }, { "id": 32000, "logprob": -14.5859375, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -11.765625, "text": "<image>" }, { "id": 32000, "logprob": -13.21875, "text": "<image>" }, { "id": 32000, "logprob": -11.4296875, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -12.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.09375, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.8359375, "text": "<image>" }, { "id": 32000, "logprob": -11.40625, "text": "<image>" }, { "id": 32000, "logprob": -12.875, "text": "<image>" }, { "id": 32000, "logprob": -11.9609375, "text": "<image>" }, { "id": 32000, "logprob": -13.078125, "text": "<image>" }, { "id": 32000, "logprob": -12.625, "text": "<image>" }, { "id": 32000, "logprob": -11.609375, "text": "<image>" }, { "id": 32000, "logprob": -11.1328125, "text": "<image>" }, { "id": 32000, "logprob": -15.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.0859375, "text": "<image>" }, { "id": 32000, "logprob": -14.25, "text": "<image>" }, { "id": 32000, "logprob": -12.2265625, "text": "<image>" }, { "id": 32000, "logprob": -16.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.8125, "text": "<image>" }, { "id": 32000, "logprob": -12.7890625, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -12.3125, "text": "<image>" }, { "id": 32000, "logprob": -12.3203125, "text": "<image>" }, { "id": 32000, "logprob": -11.9609375, "text": "<image>" }, { "id": 32000, "logprob": -14.2734375, "text": "<image>" }, { "id": 32000, "logprob": -14.90625, "text": "<image>" }, { "id": 32000, "logprob": -12.8125, "text": "<image>" }, { "id": 32000, "logprob": -14.546875, "text": "<image>" }, { "id": 32000, "logprob": -19.4375, "text": "<image>" }, { "id": 32000, "logprob": -12.2109375, "text": "<image>" }, { "id": 32000, "logprob": -13.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.4453125, "text": "<image>" }, { "id": 32000, "logprob": -12.2265625, "text": "<image>" }, { "id": 32000, "logprob": -12.6640625, "text": "<image>" }, { "id": 32000, "logprob": -12.3359375, "text": "<image>" }, { "id": 32000, "logprob": -17.640625, "text": "<image>" }, { "id": 32000, "logprob": -17.734375, "text": "<image>" }, { "id": 32000, "logprob": -11.515625, "text": "<image>" }, { "id": 32000, "logprob": -12.0078125, "text": "<image>" }, { "id": 32000, "logprob": -13.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.5703125, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.328125, "text": "<image>" }, { "id": 32000, "logprob": -13.484375, "text": "<image>" }, { "id": 32000, "logprob": -10.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -12.9375, "text": "<image>" }, { "id": 32000, "logprob": -13.2890625, "text": "<image>" }, { "id": 32000, "logprob": -14.34375, "text": "<image>" }, { "id": 32000, "logprob": -14.5, "text": "<image>" }, { "id": 32000, "logprob": -11.578125, "text": "<image>" }, { "id": 32000, "logprob": -11.6953125, "text": "<image>" }, { "id": 32000, "logprob": -14.875, "text": "<image>" }, { "id": 32000, "logprob": -11.03125, "text": "<image>" }, { "id": 32000, "logprob": -11.34375, "text": "<image>" }, { "id": 32000, "logprob": -11.75, "text": "<image>" }, { "id": 32000, "logprob": -11.7265625, "text": "<image>" }, { "id": 32000, "logprob": -11.8984375, "text": "<image>" }, { "id": 32000, "logprob": -11.9296875, "text": "<image>" }, { "id": 32000, "logprob": -11.328125, "text": "<image>" }, { "id": 32000, "logprob": -11.9140625, "text": "<image>" }, { "id": 32000, "logprob": -11.5859375, "text": "<image>" }, { "id": 32000, "logprob": -15.046875, "text": "<image>" }, { "id": 32000, "logprob": -12.2109375, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -16.875, "text": "<image>" }, { "id": 32000, "logprob": -12.1640625, "text": "<image>" }, { "id": 32000, "logprob": -14.7890625, "text": "<image>" }, { "id": 32000, "logprob": -11.9609375, "text": "<image>" }, { "id": 32000, "logprob": -12.796875, "text": "<image>" }, { "id": 32000, "logprob": -11.984375, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -12.703125, "text": "<image>" }, { "id": 32000, "logprob": -14.4765625, "text": "<image>" }, { "id": 32000, "logprob": -10.0625, "text": "<image>" }, { "id": 32000, "logprob": -12.0234375, "text": "<image>" }, { "id": 32000, "logprob": -11.5625, "text": "<image>" }, { "id": 32000, "logprob": -14.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.8125, "text": "<image>" }, { "id": 32000, "logprob": -12.4453125, "text": "<image>" }, { "id": 32000, "logprob": -12.6640625, "text": "<image>" }, { "id": 32000, "logprob": -12.0078125, "text": "<image>" }, { "id": 32000, "logprob": -12.1640625, "text": "<image>" }, { "id": 32000, "logprob": -14.875, "text": "<image>" }, { "id": 32000, "logprob": -11.59375, "text": "<image>" }, { "id": 32000, "logprob": -10.75, "text": "<image>" }, { "id": 32000, "logprob": -12.4375, "text": "<image>" }, { "id": 32000, "logprob": -13.265625, "text": "<image>" }, { "id": 32000, "logprob": -11.1328125, "text": "<image>" }, { "id": 32000, "logprob": -16.921875, "text": "<image>" }, { "id": 32000, "logprob": -12.8828125, "text": "<image>" }, { "id": 32000, "logprob": -13.53125, "text": "<image>" }, { "id": 32000, "logprob": -14.625, "text": "<image>" }, { "id": 32000, "logprob": -10.9375, "text": "<image>" }, { "id": 32000, "logprob": -11.390625, "text": "<image>" }, { "id": 32000, "logprob": -17.09375, "text": "<image>" }, { "id": 32000, "logprob": -13.7421875, "text": "<image>" }, { "id": 32000, "logprob": -12.84375, "text": "<image>" }, { "id": 32000, "logprob": -11.2734375, "text": "<image>" }, { "id": 32000, "logprob": -11.734375, "text": "<image>" }, { "id": 32000, "logprob": -11.2109375, "text": "<image>" }, { "id": 32000, "logprob": -11.9921875, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -14.328125, "text": "<image>" }, { "id": 32000, "logprob": -11.9375, "text": "<image>" }, { "id": 32000, "logprob": -13.1875, "text": "<image>" }, { "id": 32000, "logprob": -11.421875, "text": "<image>" }, { "id": 32000, "logprob": -12.859375, "text": "<image>" }, { "id": 32000, "logprob": -11.078125, "text": "<image>" }, { "id": 32000, "logprob": -11.609375, "text": "<image>" }, { "id": 32000, "logprob": -12.6328125, "text": "<image>" }, { "id": 32000, "logprob": -10.890625, "text": "<image>" }, { "id": 32000, "logprob": -12.71875, "text": "<image>" }, { "id": 32000, "logprob": -13.453125, "text": "<image>" }, { "id": 32000, "logprob": -12.4140625, "text": "<image>" }, { "id": 32000, "logprob": -12.5625, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -16.546875, "text": "<image>" }, { "id": 32000, "logprob": -13.15625, "text": "<image>" }, { "id": 32000, "logprob": -11.0546875, "text": "<image>" }, { "id": 32000, "logprob": -13.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.828125, "text": "<image>" }, { "id": 32000, "logprob": -14.3515625, "text": "<image>" }, { "id": 32000, "logprob": -11.5703125, "text": "<image>" }, { "id": 32000, "logprob": -11.921875, "text": "<image>" }, { "id": 32000, "logprob": -12.84375, "text": "<image>" }, { "id": 32000, "logprob": -12.9296875, "text": "<image>" }, { "id": 32000, "logprob": -11.46875, "text": "<image>" }, { "id": 32000, "logprob": -11.625, "text": "<image>" }, { "id": 32000, "logprob": -14.125, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -14.703125, "text": "<image>" }, { "id": 32000, "logprob": -12.640625, "text": "<image>" }, { "id": 32000, "logprob": -10.75, "text": "<image>" }, { "id": 32000, "logprob": -12.78125, "text": "<image>" }, { "id": 32000, "logprob": -15.140625, "text": "<image>" }, { "id": 32000, "logprob": -13.59375, "text": "<image>" }, { "id": 32000, "logprob": -11.6953125, "text": "<image>" }, { "id": 32000, "logprob": -11.390625, "text": "<image>" }, { "id": 32000, "logprob": -15.1953125, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.765625, "text": "<image>" }, { "id": 32000, "logprob": -12.921875, "text": "<image>" }, { "id": 32000, "logprob": -13.453125, "text": "<image>" }, { "id": 32000, "logprob": -15.5078125, "text": "<image>" }, { "id": 32000, "logprob": -12.3984375, "text": "<image>" }, { "id": 32000, "logprob": -13.78125, "text": "<image>" }, { "id": 32000, "logprob": -12.1875, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -12.21875, "text": "<image>" }, { "id": 32000, "logprob": -12.8984375, "text": "<image>" }, { "id": 32000, "logprob": -11.7109375, "text": "<image>" }, { "id": 32000, "logprob": -11.734375, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -16.375, "text": "<image>" }, { "id": 32000, "logprob": -12.5, "text": "<image>" }, { "id": 32000, "logprob": -11.9609375, "text": "<image>" }, { "id": 32000, "logprob": -13.875, "text": "<image>" }, { "id": 32000, "logprob": -15.0, "text": "<image>" }, { "id": 32000, "logprob": -11.640625, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -16.03125, "text": "<image>" }, { "id": 32000, "logprob": -11.734375, "text": "<image>" }, { "id": 32000, "logprob": -13.5859375, "text": "<image>" }, { "id": 32000, "logprob": -15.3359375, "text": "<image>" }, { "id": 32000, "logprob": -12.2421875, "text": "<image>" }, { "id": 32000, "logprob": -11.703125, "text": "<image>" }, { "id": 32000, "logprob": -12.40625, "text": "<image>" }, { "id": 32000, "logprob": -16.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -14.578125, "text": "<image>" }, { "id": 32000, "logprob": -11.625, "text": "<image>" }, { "id": 32000, "logprob": -12.109375, "text": "<image>" }, { "id": 32000, "logprob": -12.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -15.8203125, "text": "<image>" }, { "id": 32000, "logprob": -17.703125, "text": "<image>" }, { "id": 32000, "logprob": -12.09375, "text": "<image>" }, { "id": 32000, "logprob": -11.828125, "text": "<image>" }, { "id": 32000, "logprob": -11.703125, "text": "<image>" }, { "id": 32000, "logprob": -11.5390625, "text": "<image>" }, { "id": 32000, "logprob": -13.53125, "text": "<image>" }, { "id": 32000, "logprob": -11.140625, "text": "<image>" }, { "id": 32000, "logprob": -16.75, "text": "<image>" }, { "id": 32000, "logprob": -13.0625, "text": "<image>" }, { "id": 32000, "logprob": -14.90625, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -15.1640625, "text": "<image>" }, { "id": 32000, "logprob": -11.828125, "text": "<image>" }, { "id": 32000, "logprob": -11.4140625, "text": "<image>" }, { "id": 32000, "logprob": -11.28125, "text": "<image>" }, { "id": 32000, "logprob": -16.953125, "text": "<image>" }, { "id": 32000, "logprob": -13.0546875, "text": "<image>" }, { "id": 32000, "logprob": -14.890625, "text": "<image>" }, { "id": 32000, "logprob": -12.1328125, "text": "<image>" }, { "id": 32000, "logprob": -11.375, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -12.34375, "text": "<image>" }, { "id": 32000, "logprob": -11.84375, "text": "<image>" }, { "id": 32000, "logprob": -13.7578125, "text": "<image>" }, { "id": 32000, "logprob": -12.6875, "text": "<image>" }, { "id": 32000, "logprob": -14.71875, "text": "<image>" }, { "id": 32000, "logprob": -12.578125, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -11.6875, "text": "<image>" }, { "id": 32000, "logprob": -11.40625, "text": "<image>" }, { "id": 32000, "logprob": -11.65625, "text": "<image>" }, { "id": 32000, "logprob": -11.8046875, "text": "<image>" }, { "id": 32000, "logprob": -12.9453125, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -13.5234375, "text": "<image>" }, { "id": 32000, "logprob": -11.9765625, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -11.765625, "text": "<image>" }, { "id": 32000, "logprob": -12.421875, "text": "<image>" }, { "id": 32000, "logprob": -18.65625, "text": "<image>" }, { "id": 32000, "logprob": -15.6796875, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -13.6953125, "text": "<image>" }, { "id": 32000, "logprob": -15.0859375, "text": "<image>" }, { "id": 32000, "logprob": -14.0703125, "text": "<image>" }, { "id": 32000, "logprob": -14.53125, "text": "<image>" }, { "id": 32000, "logprob": -12.359375, "text": "<image>" }, { "id": 32000, "logprob": -12.0625, "text": "<image>" }, { "id": 32000, "logprob": -13.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.8203125, "text": "<image>" }, { "id": 32000, "logprob": -13.046875, "text": "<image>" }, { "id": 32000, "logprob": -13.078125, "text": "<image>" }, { "id": 32000, "logprob": -12.625, "text": "<image>" }, { "id": 32000, "logprob": -11.3203125, "text": "<image>" }, { "id": 32000, "logprob": -14.2109375, "text": "<image>" }, { "id": 32000, "logprob": -11.734375, "text": "<image>" }, { "id": 32000, "logprob": -13.8515625, "text": "<image>" }, { "id": 32000, "logprob": -11.578125, "text": "<image>" }, { "id": 32000, "logprob": -11.703125, "text": "<image>" }, { "id": 32000, "logprob": -10.65625, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -12.234375, "text": "<image>" }, { "id": 32000, "logprob": -11.8984375, "text": "<image>" }, { "id": 32000, "logprob": -11.7578125, "text": "<image>" }, { "id": 32000, "logprob": -12.890625, "text": "<image>" }, { "id": 32000, "logprob": -11.453125, "text": "<image>" }, { "id": 32000, "logprob": -14.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.953125, "text": "<image>" }, { "id": 32000, "logprob": -11.90625, "text": "<image>" }, { "id": 32000, "logprob": -12.125, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -12.53125, "text": "<image>" }, { "id": 32000, "logprob": -10.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.2265625, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -13.5546875, "text": "<image>" }, { "id": 32000, "logprob": -16.6875, "text": "<image>" }, { "id": 32000, "logprob": -11.96875, "text": "<image>" }, { "id": 32000, "logprob": -12.8203125, "text": "<image>" }, { "id": 32000, "logprob": -14.9140625, "text": "<image>" }, { "id": 32000, "logprob": -16.5625, "text": "<image>" }, { "id": 32000, "logprob": -15.171875, "text": "<image>" }, { "id": 32000, "logprob": -14.6796875, "text": "<image>" }, { "id": 32000, "logprob": -12.03125, "text": "<image>" }, { "id": 32000, "logprob": -12.9375, "text": "<image>" }, { "id": 32000, "logprob": -12.2109375, "text": "<image>" }, { "id": 32000, "logprob": -12.7421875, "text": "<image>" }, { "id": 32000, "logprob": -13.3515625, "text": "<image>" }, { "id": 32000, "logprob": -13.7890625, "text": "<image>" }, { "id": 32000, "logprob": -12.5, "text": "<image>" }, { "id": 32000, "logprob": -12.140625, "text": "<image>" }, { "id": 32000, "logprob": -15.4453125, "text": "<image>" }, { "id": 32000, "logprob": -14.640625, "text": "<image>" }, { "id": 32000, "logprob": -13.6171875, "text": "<image>" }, { "id": 32000, "logprob": -13.796875, "text": "<image>" }, { "id": 32000, "logprob": -12.7109375, "text": "<image>" }, { "id": 32000, "logprob": -13.2578125, "text": "<image>" }, { "id": 32000, "logprob": -12.96875, "text": "<image>" }, { "id": 32000, "logprob": -13.5390625, "text": "<image>" }, { "id": 32000, "logprob": -13.265625, "text": "<image>" }, { "id": 32000, "logprob": -12.4453125, "text": "<image>" }, { "id": 32000, "logprob": -11.390625, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -16.5625, "text": "<image>" }, { "id": 32000, "logprob": -14.171875, "text": "<image>" }, { "id": 32000, "logprob": -13.2265625, "text": "<image>" }, { "id": 32000, "logprob": -11.7890625, "text": "<image>" }, { "id": 32000, "logprob": -15.1171875, "text": "<image>" }, { "id": 32000, "logprob": -15.4453125, "text": "<image>" }, { "id": 32000, "logprob": -12.6015625, "text": "<image>" }, { "id": 32000, "logprob": -11.4921875, "text": "<image>" }, { "id": 32000, "logprob": -10.703125, "text": "<image>" }, { "id": 32000, "logprob": -11.15625, "text": "<image>" }, { "id": 32000, "logprob": -12.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.5546875, "text": "<image>" }, { "id": 32000, "logprob": -12.734375, "text": "<image>" }, { "id": 32000, "logprob": -17.953125, "text": "<image>" }, { "id": 32000, "logprob": -11.59375, "text": "<image>" }, { "id": 32000, "logprob": -11.625, "text": "<image>" }, { "id": 32000, "logprob": -14.921875, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.8515625, "text": "<image>" }, { "id": 32000, "logprob": -15.3046875, "text": "<image>" }, { "id": 32000, "logprob": -12.171875, "text": "<image>" }, { "id": 32000, "logprob": -12.7265625, "text": "<image>" }, { "id": 32000, "logprob": -15.140625, "text": "<image>" }, { "id": 32000, "logprob": -14.9375, "text": "<image>" }, { "id": 32000, "logprob": -15.6875, "text": "<image>" }, { "id": 32000, "logprob": -16.015625, "text": "<image>" }, { "id": 32000, "logprob": -14.125, "text": "<image>" }, { "id": 32000, "logprob": -13.2265625, "text": "<image>" }, { "id": 32000, "logprob": -13.1640625, "text": "<image>" }, { "id": 32000, "logprob": -12.703125, "text": "<image>" }, { "id": 32000, "logprob": -11.5625, "text": "<image>" }, { "id": 32000, "logprob": -14.765625, "text": "<image>" }, { "id": 32000, "logprob": -14.3515625, "text": "<image>" }, { "id": 32000, "logprob": -11.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.9296875, "text": "<image>" }, { "id": 32000, "logprob": -15.296875, "text": "<image>" }, { "id": 32000, "logprob": -13.6328125, "text": "<image>" }, { "id": 32000, "logprob": -11.4296875, "text": "<image>" }, { "id": 32000, "logprob": -13.5078125, "text": "<image>" }, { "id": 32000, "logprob": -10.890625, "text": "<image>" }, { "id": 32000, "logprob": -14.1171875, "text": "<image>" }, { "id": 32000, "logprob": -12.15625, "text": "<image>" }, { "id": 32000, "logprob": -13.1171875, "text": "<image>" }, { "id": 32000, "logprob": -12.5625, "text": "<image>" }, { "id": 32000, "logprob": -13.359375, "text": "<image>" }, { "id": 32000, "logprob": -11.734375, "text": "<image>" }, { "id": 32000, "logprob": -10.953125, "text": "<image>" }, { "id": 32000, "logprob": -12.75, "text": "<image>" }, { "id": 32000, "logprob": -12.7734375, "text": "<image>" }, { "id": 32000, "logprob": -13.5, "text": "<image>" }, { "id": 32000, "logprob": -11.578125, "text": "<image>" }, { "id": 32000, "logprob": -11.09375, "text": "<image>" }, { "id": 32000, "logprob": -12.5390625, "text": "<image>" }, { "id": 32000, "logprob": -13.3984375, "text": "<image>" }, { "id": 32000, "logprob": -19.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.4765625, "text": "<image>" }, { "id": 32000, "logprob": -11.609375, "text": "<image>" }, { "id": 32000, "logprob": -13.890625, "text": "<image>" }, { "id": 32000, "logprob": -13.3203125, "text": "<image>" }, { "id": 32000, "logprob": -12.8359375, "text": "<image>" }, { "id": 32000, "logprob": -12.0625, "text": "<image>" }, { "id": 32000, "logprob": -11.6953125, "text": "<image>" }, { "id": 32000, "logprob": -13.7265625, "text": "<image>" }, { "id": 32000, "logprob": -12.3046875, "text": "<image>" }, { "id": 32000, "logprob": -11.6171875, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -13.9140625, "text": "<image>" }, { "id": 32000, "logprob": -11.7109375, "text": "<image>" }, { "id": 32000, "logprob": -16.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.6953125, "text": "<image>" }, { "id": 32000, "logprob": -13.4765625, "text": "<image>" }, { "id": 32000, "logprob": -15.2421875, "text": "<image>" }, { "id": 32000, "logprob": -11.53125, "text": "<image>" }, { "id": 32000, "logprob": -14.578125, "text": "<image>" }, { "id": 32000, "logprob": -15.28125, "text": "<image>" }, { "id": 32000, "logprob": -14.0390625, "text": "<image>" }, { "id": 32000, "logprob": -12.9296875, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -12.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.296875, "text": "<image>" }, { "id": 32000, "logprob": -14.2421875, "text": "<image>" }, { "id": 32000, "logprob": -12.0703125, "text": "<image>" }, { "id": 32000, "logprob": -12.609375, "text": "<image>" }, { "id": 32000, "logprob": -13.5546875, "text": "<image>" }, { "id": 32000, "logprob": -11.265625, "text": "<image>" }, { "id": 32000, "logprob": -11.7421875, "text": "<image>" }, { "id": 32000, "logprob": -15.0546875, "text": "<image>" }, { "id": 32000, "logprob": -14.390625, "text": "<image>" }, { "id": 32000, "logprob": -11.1796875, "text": "<image>" }, { "id": 32000, "logprob": -13.1171875, "text": "<image>" }, { "id": 32000, "logprob": -11.421875, "text": "<image>" }, { "id": 32000, "logprob": -15.8203125, "text": "<image>" }, { "id": 32000, "logprob": -12.7734375, "text": "<image>" }, { "id": 32000, "logprob": -13.3125, "text": "<image>" }, { "id": 32000, "logprob": -11.4453125, "text": "<image>" }, { "id": 32000, "logprob": -14.2578125, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -12.546875, "text": "<image>" }, { "id": 32000, "logprob": -16.34375, "text": "<image>" }, { "id": 32000, "logprob": -13.875, "text": "<image>" }, { "id": 32000, "logprob": -14.6484375, "text": "<image>" }, { "id": 32000, "logprob": -12.6015625, "text": "<image>" }, { "id": 32000, "logprob": -11.3671875, "text": "<image>" }, { "id": 32000, "logprob": -11.515625, "text": "<image>" }, { "id": 32000, "logprob": -12.7578125, "text": "<image>" }, { "id": 32000, "logprob": -11.6796875, "text": "<image>" }, { "id": 32000, "logprob": -14.5703125, "text": "<image>" }, { "id": 32000, "logprob": -11.3515625, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -10.9296875, "text": "<image>" }, { "id": 32000, "logprob": -13.203125, "text": "<image>" }, { "id": 32000, "logprob": -12.2734375, "text": "<image>" }, { "id": 32000, "logprob": -12.1796875, "text": "<image>" }, { "id": 32000, "logprob": -11.953125, "text": "<image>" }, { "id": 32000, "logprob": -12.90625, "text": "<image>" }, { "id": 32000, "logprob": -12.0546875, "text": "<image>" }, { "id": 32000, "logprob": -11.4453125, "text": "<image>" }, { "id": 32000, "logprob": -14.9453125, "text": "<image>" }, { "id": 32000, "logprob": -13.625, "text": "<image>" }, { "id": 32000, "logprob": -11.2890625, "text": "<image>" }, { "id": 32000, "logprob": -12.9609375, "text": "<image>" }, { "id": 32000, "logprob": -11.7734375, "text": "<image>" }, { "id": 32000, "logprob": -12.5703125, "text": "<image>" }, { "id": 32000, "logprob": -12.28125, "text": "<image>" }, { "id": 32000, "logprob": -12.328125, "text": "<image>" }, { "id": 32000, "logprob": -15.1875, "text": "<image>" }, { "id": 32000, "logprob": -11.7421875, "text": "<image>" }, { "id": 32000, "logprob": -12.078125, "text": "<image>" }, { "id": 32000, "logprob": -15.0625, "text": "<image>" }, { "id": 32000, "logprob": -15.5703125, "text": "<image>" }, { "id": 32000, "logprob": -11.671875, "text": "<image>" }, { "id": 32000, "logprob": -12.3359375, "text": "<image>" }, { "id": 32000, "logprob": -14.9140625, "text": "<image>" }, { "id": 32000, "logprob": -12.6171875, "text": "<image>" }, { "id": 32000, "logprob": -14.6640625, "text": "<image>" }, { "id": 32000, "logprob": -15.34375, "text": "<image>" }, { "id": 32000, "logprob": -15.2109375, "text": "<image>" }, { "id": 32000, "logprob": -11.796875, "text": "<image>" }, { "id": 32000, "logprob": -13.703125, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -11.7890625, "text": "<image>" }, { "id": 32000, "logprob": -12.0, "text": "<image>" }, { "id": 32000, "logprob": -12.8828125, "text": "<image>" }, { "id": 32000, "logprob": -16.375, "text": "<image>" }, { "id": 32000, "logprob": -12.78125, "text": "<image>" }, { "id": 32000, "logprob": -12.4453125, "text": "<image>" }, { "id": 32000, "logprob": -13.3828125, "text": "<image>" }, { "id": 32000, "logprob": -13.171875, "text": "<image>" }, { "id": 32000, "logprob": -11.8828125, "text": "<image>" }, { "id": 32000, "logprob": -11.453125, "text": "<image>" }, { "id": 32000, "logprob": -12.71875, "text": "<image>" }, { "id": 32000, "logprob": -11.84375, "text": "<image>" }, { "id": 32000, "logprob": -11.2734375, "text": "<image>" }, { "id": 32000, "logprob": -14.71875, "text": "<image>" }, { "id": 32000, "logprob": -14.265625, "text": "<image>" }, { "id": 32000, "logprob": -12.0390625, "text": "<image>" }, { "id": 32000, "logprob": -13.6015625, "text": "<image>" }, { "id": 32000, "logprob": -13.6484375, "text": "<image>" }, { "id": 32000, "logprob": -11.3046875, "text": "<image>" }, { "id": 32000, "logprob": -11.96875, "text": "<image>" }, { "id": 32000, "logprob": -13.3984375, "text": "<image>" }, { "id": 32000, "logprob": -12.921875, "text": "<image>" }, { "id": 32000, "logprob": -14.2109375, "text": "<image>" }, { "id": 32000, "logprob": -12.875, "text": "<image>" }, { "id": 32000, "logprob": -13.875, "text": "<image>" }, { "id": 32000, "logprob": -13.359375, "text": "<image>" }, { "id": 32000, "logprob": -13.2109375, "text": "<image>" }, { "id": 32000, "logprob": -11.984375, "text": "<image>" }, { "id": 32000, "logprob": -15.140625, "text": "<image>" }, { "id": 32000, "logprob": -12.015625, "text": "<image>" }, { "id": 32000, "logprob": -11.03125, "text": "<image>" }, { "id": 32000, "logprob": -14.40625, "text": "<image>" }, { "id": 32000, "logprob": -12.046875, "text": "<image>" }, { "id": 32000, "logprob": -13.7734375, "text": "<image>" }, { "id": 32000, "logprob": -13.5546875, "text": "<image>" }, { "id": 32000, "logprob": -11.7265625, "text": "<image>" }, { "id": 32000, "logprob": -13.390625, "text": "<image>" }, { "id": 32000, "logprob": -16.84375, "text": "<image>" }, { "id": 32000, "logprob": -12.5859375, "text": "<image>" }, { "id": 32000, "logprob": -17.640625, "text": "<image>" }, { "id": 32000, "logprob": -11.9453125, "text": "<image>" }, { "id": 32000, "logprob": -12.71875, "text": "<image>" }, { "id": 32000, "logprob": -16.0, "text": "<image>" }, { "id": 32000, "logprob": -16.625, "text": "<image>" }, { "id": 32000, "logprob": -12.4921875, "text": "<image>" }, { "id": 2418, "logprob": -19.0625, "text": "Can" }, { "id": 368, "logprob": -0.19604492, "text": "you" }, { "id": 1912, "logprob": -1.5058594, "text": "tell" }, { "id": 528, "logprob": -0.31030273, "text": "me" }, { "id": 264, "logprob": -2.6367188, "text": "a" }, { "id": 1215, "logprob": -9.1015625, "text": "very" }, { "id": 2485, "logprob": -0.9975586, "text": "short" }, { "id": 2838, "logprob": -0.4633789, "text": "story" }, { "id": 2818, "logprob": -3.3144531, "text": "based" }, { "id": 356, "logprob": -0.029037476, "text": "on" }, { "id": 272, "logprob": -0.9902344, "text": "the" }, { "id": 3469, "logprob": -0.2890625, "text": "image" }, { "id": 28804, "logprob": -0.42895508, "text": "?" } ], "seed": null, "tokens": [ { "id": 13, "logprob": -0.007621765, "special": false, "text": "\n" }, { "id": 13, "logprob": -0.20812988, "special": false, "text": "\n" }, { "id": 16114, "logprob": -1.2587891, "special": false, "text": "Once" }, { "id": 3714, "logprob": -0.20825195, "special": false, "text": " upon" }, { "id": 264, "logprob": -0.0017709732, "special": false, "text": " a" }, { "id": 727, "logprob": -0.011932373, "special": false, "text": " time" }, { "id": 28725, "logprob": -0.17297363, "special": false, "text": "," }, { "id": 736, "logprob": -0.9057617, "special": false, "text": " there" }, { "id": 403, "logprob": -0.05758667, "special": false, "text": " was" }, { "id": 264, "logprob": -0.00970459, "special": false, "text": " a" } ], "top_tokens": null }, "generated_text": "\n\nOnce upon a time, there was a" } ]
text-generation-inference/integration-tests/models/__snapshots__/test_llava_next/test_flash_llava_next_load.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_llava_next/test_flash_llava_next_load.json", "repo_id": "text-generation-inference", "token_count": 848284 }
246
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|prompter|>" }, { "id": 1276, "logprob": -8.0234375, "text": "What" }, { "id": 310, "logprob": -5.4179688, "text": " is" }, { "id": 247, "logprob": -2.1542969, "text": " a" }, { "id": 1167, "logprob": -5.359375, "text": " mem" }, { "id": 70, "logprob": -0.006038666, "text": "e" }, { "id": 13, "logprob": -7.328125, "text": "," }, { "id": 285, "logprob": -0.3173828, "text": " and" }, { "id": 752, "logprob": -2.0625, "text": " what" }, { "id": 434, "logprob": -5.7734375, "text": "'s" }, { "id": 253, "logprob": -0.74072266, "text": " the" }, { "id": 2892, "logprob": -6.5898438, "text": " history" }, { "id": 3212, "logprob": -2.2949219, "text": " behind" }, { "id": 436, "logprob": -11.40625, "text": " this" }, { "id": 3159, "logprob": -2.1113281, "text": " word" }, { "id": 32, "logprob": -0.008056641, "text": "?" }, { "id": 0, "logprob": -2.3300781, "text": "<|endoftext|>" }, { "id": 50281, "logprob": -18.28125, "text": "<|assistant|>" } ], "seed": null, "tokens": [ { "id": 510, "logprob": -0.5878906, "special": false, "text": "The" }, { "id": 3159, "logprob": -0.5449219, "special": false, "text": " word" }, { "id": 346, "logprob": -0.05038452, "special": false, "text": " \"" }, { "id": 6441, "logprob": -0.002292633, "special": false, "text": "mem" }, { "id": 70, "logprob": -1.3828278e-05, "special": false, "text": "e" }, { "id": 3, "logprob": -0.0010242462, "special": false, "text": "\"" }, { "id": 369, "logprob": -0.090270996, "special": false, "text": " was" }, { "id": 806, "logprob": -0.12719727, "special": false, "text": " first" }, { "id": 908, "logprob": -0.016571045, "special": false, "text": " used" }, { "id": 275, "logprob": -0.43432617, "special": false, "text": " in" } ] }, "generated_text": "The word \"meme\" was first used in" }
text-generation-inference/integration-tests/models/__snapshots__/test_neox_sharded/test_neox.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_neox_sharded/test_neox.json", "repo_id": "text-generation-inference", "token_count": 1966 }
247
import pytest @pytest.fixture(scope="module") def flash_llama_awq_handle(launcher): with launcher( "abhinavkulkarni/codellama-CodeLlama-7b-Python-hf-w4-g128-awq", num_shard=1, quantize="awq", ) as handle: yield handle @pytest.fixture(scope="module") async def flash_llama_awq(flash_llama_awq_handle): await flash_llama_awq_handle.health(300) return flash_llama_awq_handle.client @pytest.mark.release @pytest.mark.asyncio async def test_flash_llama_awq(flash_llama_awq, response_snapshot): response = await flash_llama_awq.generate( "What is Deep Learning?", max_new_tokens=10, decoder_input_details=True ) assert response.details.generated_tokens == 10 assert ( response.generated_text == "\nWhat is the difference between Deep Learning and Machine" ) assert response == response_snapshot @pytest.mark.release @pytest.mark.asyncio async def test_flash_llama_awq_all_params(flash_llama_awq, response_snapshot): response = await flash_llama_awq.generate( "What is Deep Learning?", max_new_tokens=10, repetition_penalty=1.2, return_full_text=True, temperature=0.5, top_p=0.9, top_k=10, truncate=5, typical_p=0.9, watermark=True, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.release @pytest.mark.asyncio async def test_flash_llama_awq_load(flash_llama_awq, generate_load, response_snapshot): responses = await generate_load( flash_llama_awq, "What is Deep Learning?", max_new_tokens=10, n=4 ) assert len(responses) == 4 assert all( [ r.generated_text == "\nWhat is the difference between Deep Learning and Machine" for r in responses ] ) assert responses == response_snapshot
text-generation-inference/integration-tests/models/test_flash_awq.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_flash_awq.py", "repo_id": "text-generation-inference", "token_count": 866 }
248
import pytest @pytest.fixture(scope="module") def flash_mistral_handle(launcher): with launcher("mistralai/Mistral-7B-Instruct-v0.1") as handle: yield handle @pytest.fixture(scope="module") async def flash_mistral(flash_mistral_handle): await flash_mistral_handle.health(300) return flash_mistral_handle.client @pytest.mark.asyncio async def test_flash_mistral(flash_mistral, response_snapshot): response = await flash_mistral.generate( "Test request", max_new_tokens=10, decoder_input_details=True ) assert response.details.generated_tokens == 10 assert response.generated_text == ": Let n = 10 - 1" assert response == response_snapshot @pytest.mark.asyncio async def test_flash_mistral_all_params(flash_mistral, response_snapshot): response = await flash_mistral.generate( "Test request", max_new_tokens=10, repetition_penalty=1.2, return_full_text=True, stop_sequences=["test"], temperature=0.5, top_p=0.9, top_k=10, truncate=5, typical_p=0.9, watermark=True, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio async def test_flash_mistral_load(flash_mistral, generate_load, response_snapshot): responses = await generate_load( flash_mistral, "Test request", max_new_tokens=10, n=4 ) assert len(responses) == 4 assert all( [r.generated_text == responses[0].generated_text for r in responses] ), f"{[r.generated_text for r in responses]}" assert responses[0].generated_text == ": Let n = 10 - 1" assert responses == response_snapshot
text-generation-inference/integration-tests/models/test_flash_mistral.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_flash_mistral.py", "repo_id": "text-generation-inference", "token_count": 714 }
249
import pytest @pytest.fixture(scope="module") def fused_kernel_mamba_handle(launcher): with launcher("state-spaces/mamba-130m", num_shard=1) as handle: yield handle @pytest.fixture(scope="module") async def fused_kernel_mamba(fused_kernel_mamba_handle): await fused_kernel_mamba_handle.health(300) return fused_kernel_mamba_handle.client @pytest.mark.release @pytest.mark.asyncio async def test_mamba(fused_kernel_mamba, response_snapshot): response = await fused_kernel_mamba.generate( "What is Deep Learning?", max_new_tokens=10 ) assert response.details.generated_tokens == 10 assert response.generated_text == "\n\nDeep learning is a new type of machine" assert response == response_snapshot @pytest.mark.release @pytest.mark.asyncio async def test_mamba_all_params(fused_kernel_mamba, response_snapshot): response = await fused_kernel_mamba.generate( "blue, red, yellow, ", max_new_tokens=10, repetition_penalty=1.2, return_full_text=True, stop_sequences=["test"], temperature=0.5, top_p=0.9, top_k=10, truncate=5, typical_p=0.9, watermark=True, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 10 assert ( response.generated_text == "blue, red, yellow, \nand blue colors. A number of different color" ) assert response == response_snapshot @pytest.mark.release @pytest.mark.asyncio async def test_mamba_load( fused_kernel_mamba, generate_load, generous_response_snapshot ): responses = await generate_load( fused_kernel_mamba, "What is Deep Learning?", max_new_tokens=10, n=4 ) assert len(responses) == 4 assert responses[0].generated_text == "\n\nDeep learning is a new type of machine" assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses[0].generated_text == "\n\nDeep learning is a new type of machine" assert responses == generous_response_snapshot
text-generation-inference/integration-tests/models/test_mamba.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_mamba.py", "repo_id": "text-generation-inference", "token_count": 822 }
250
ShareGPT_V3_unfiltered_cleaned_split.json: wget https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/resolve/main/ShareGPT_V3_unfiltered_cleaned_split.json prepare_share: ShareGPT_V3_unfiltered_cleaned_split.json python filter.py prepare_orca: python orca.py
text-generation-inference/load_tests/Makefile/0
{ "file_path": "text-generation-inference/load_tests/Makefile", "repo_id": "text-generation-inference", "token_count": 123 }
251
use crate::infer::{InferError, InferStreamResponse}; use crate::validation::{ ValidGenerateRequest, ValidGrammar, ValidParameters, ValidStoppingParameters, }; use nohash_hasher::{BuildNoHashHasher, IntMap}; use std::cmp::min; use std::collections::VecDeque; use text_generation_client::v2::{ Batch, GrammarType, NextTokenChooserParameters, Request, StoppingCriteriaParameters, }; use text_generation_client::ChunksToString; use tokio::sync::{mpsc, oneshot}; use tokio::time::Instant; use tracing::{info_span, instrument, Span}; /// Queue entry #[derive(Debug)] pub(crate) struct Entry { /// Request pub request: ValidGenerateRequest, /// Response sender to communicate between the Infer struct and the batching_task pub response_tx: mpsc::UnboundedSender<Result<InferStreamResponse, InferError>>, /// Span that will live as long as entry pub span: Span, /// Temporary span used as a guard when logging inference, wait times... pub temp_span: Option<Span>, /// Instant when this entry was queued pub queue_time: Instant, /// Instant when this entry was added to a batch pub batch_time: Option<Instant>, } /// Request Queue #[derive(Debug, Clone)] pub(crate) struct Queue { /// Channel to communicate with the background queue task queue_sender: mpsc::UnboundedSender<QueueCommand>, } impl Queue { pub(crate) fn new( requires_padding: bool, block_size: u32, window_size: Option<u32>, speculate: u32, ) -> Self { // Create channel let (queue_sender, queue_receiver) = mpsc::unbounded_channel(); // Launch background queue task tokio::spawn(queue_task( requires_padding, block_size, window_size, speculate, queue_receiver, )); Self { queue_sender } } #[instrument(skip_all)] pub(crate) fn append(&self, entry: Entry) { // Send append command to the background task managing the state // Unwrap is safe here self.queue_sender .send(QueueCommand::Append(Box::new(entry), Span::current())) .unwrap(); } // Get the next batch #[instrument(skip(self))] pub(crate) async fn next_batch( &self, min_size: Option<usize>, max_size: Option<usize>, prefill_token_budget: u32, token_budget: u32, ) -> Option<NextBatch> { // Create response channel let (response_sender, response_receiver) = oneshot::channel(); // Send next batch command to the background task managing the state // Unwrap is safe here self.queue_sender .send(QueueCommand::NextBatch { min_size, max_size, prefill_token_budget, token_budget, response_sender, span: Span::current(), }) .unwrap(); // Await on response channel // Unwrap is safe here response_receiver.await.unwrap() } } // Background task responsible of the queue state async fn queue_task( requires_padding: bool, block_size: u32, window_size: Option<u32>, speculate: u32, mut receiver: mpsc::UnboundedReceiver<QueueCommand>, ) { let mut state = State::new(requires_padding, block_size, window_size, speculate); while let Some(cmd) = receiver.recv().await { match cmd { QueueCommand::Append(entry, span) => { span.in_scope(|| state.append(*entry)); metrics::gauge!("tgi_queue_size").increment(1.0); } QueueCommand::NextBatch { min_size, max_size, prefill_token_budget, token_budget, response_sender, span, } => span.in_scope(|| { let next_batch = state.next_batch(min_size, max_size, prefill_token_budget, token_budget); response_sender.send(next_batch).unwrap(); metrics::gauge!("tgi_queue_size").set(state.entries.len() as f64); }), } } } /// Queue State #[derive(Debug)] struct State { /// Queue entries organized in a Vec entries: VecDeque<(u64, Entry)>, /// Id of the next entry next_id: u64, /// Id of the next batch next_batch_id: u64, /// Whether the model is using padding requires_padding: bool, /// Paged Attention block size block_size: u32, /// Sliding window window_size: Option<u32>, /// Speculation amount speculate: u32, } impl State { fn new( requires_padding: bool, block_size: u32, window_size: Option<u32>, speculate: u32, ) -> Self { Self { entries: VecDeque::with_capacity(128), next_id: 0, next_batch_id: 0, requires_padding, block_size, window_size, speculate, } } /// Append an entry to the queue fn append(&mut self, mut entry: Entry) { // Create a span that will live as long as the entry is in the queue waiting to be batched let queue_span = info_span!(parent: &entry.span, "queued"); entry.temp_span = Some(queue_span); // Push entry in the queue self.entries.push_back((self.next_id, entry)); self.next_id += 1; } // Get the next batch fn next_batch( &mut self, min_size: Option<usize>, max_size: Option<usize>, prefill_token_budget: u32, token_budget: u32, ) -> Option<NextBatch> { if self.entries.is_empty() { tracing::debug!("No queue"); return None; } // Check if we have enough entries if let Some(min_size) = min_size { if self.entries.len() < min_size { tracing::debug!("Not enough entries"); return None; } } if let Some(max_size) = max_size { if max_size == 0 { tracing::debug!("No capacity"); return None; } } // Pad prefill_token_budget to be a multiple of block size let prefill_token_budget = ((prefill_token_budget + self.block_size - 1) / self.block_size) * self.block_size; // Create span for this batch to add context to inference calls let next_batch_span = info_span!(parent: None, "batch", batch_size = tracing::field::Empty); next_batch_span.follows_from(&Span::current()); let mut batch_requests = Vec::with_capacity(self.entries.len()); let mut batch_entries = IntMap::with_capacity_and_hasher(self.entries.len(), BuildNoHashHasher::default()); let mut max_input_length = 0; let mut prefill_tokens: u32 = 0; let mut decode_tokens: u32 = 0; // Pop entries starting from the front of the queue while let Some((id, mut entry)) = self.entries.pop_front() { // Filter entries where the response receiver was dropped (== entries where the request // was dropped by the client) if entry.response_tx.is_closed() { metrics::counter!("tgi_request_failure", "err" => "dropped").increment(1); tracing::debug!("Dropping entry"); continue; } if self.requires_padding { // We pad to max input length in the Python shards // We need to take these padding tokens into the equation max_input_length = max_input_length.max(entry.request.input_length); prefill_tokens = (batch_requests.len() + 1) as u32 * max_input_length } else { // pad to block size prefill_tokens += ((entry.request.input_length + self.block_size - 1) / self.block_size) * self.block_size; } if self.requires_padding { decode_tokens += entry.request.stopping_parameters.max_new_tokens; } else { let max_new_tokens = match self.window_size { None => entry.request.stopping_parameters.max_new_tokens, Some(window_size) => min( window_size.saturating_sub(entry.request.input_length), entry.request.stopping_parameters.max_new_tokens, ), }; // pad to block size decode_tokens += ((max_new_tokens + self.block_size - 1) / self.block_size) * self.block_size; } if prefill_tokens > prefill_token_budget || (prefill_tokens + decode_tokens + self.speculate) > token_budget { // Entry is over budget // Add it back to the front tracing::debug!("Over budget: prefill_tokens={prefill_tokens} > {prefill_token_budget} || {prefill_tokens} + {decode_tokens} + {} > {token_budget}", self.speculate); self.entries.push_front((id, entry)); break; } tracing::debug!("Accepting entry"); // Create a new span to link the batch back to this entry let entry_batch_span = info_span!(parent: &entry.span, "infer"); // Add relationships next_batch_span.follows_from(&entry_batch_span); entry_batch_span.follows_from(&next_batch_span); // Update entry entry.temp_span = Some(entry_batch_span); batch_requests.push(Request { id, prefill_logprobs: entry.request.decoder_input_details, inputs: entry.request.inputs.chunks_to_string(), truncate: entry.request.truncate, parameters: Some(NextTokenChooserParameters::from( entry.request.parameters.clone(), )), stopping_parameters: Some(StoppingCriteriaParameters::from( entry.request.stopping_parameters.clone(), )), top_n_tokens: entry.request.top_n_tokens, }); // Set batch_time entry.batch_time = Some(Instant::now()); // Insert in batch_entries IntMap batch_entries.insert(id, entry); // Check if max_size if Some(batch_requests.len()) == max_size { break; } } // Empty batch if batch_requests.is_empty() { tracing::debug!("Filtered out all entries"); return None; } // Check if our batch is big enough if let Some(min_size) = min_size { // Batch is too small if batch_requests.len() < min_size { // Add back entries to the queue in the correct order for r in batch_requests.into_iter().rev() { let id = r.id; let entry = batch_entries.remove(&id).unwrap(); self.entries.push_front((id, entry)); } return None; } } // Final batch size let size = batch_requests.len() as u32; next_batch_span.record("batch_size", size); let batch = Batch { id: self.next_batch_id, requests: batch_requests, size, max_tokens: (prefill_tokens + decode_tokens), }; // Increment batch id self.next_batch_id += 1; metrics::histogram!("tgi_batch_next_size").record(batch.size as f64); Some((batch_entries, batch, next_batch_span)) } } type NextBatch = (IntMap<u64, Entry>, Batch, Span); #[derive(Debug)] enum QueueCommand { Append(Box<Entry>, Span), NextBatch { min_size: Option<usize>, max_size: Option<usize>, prefill_token_budget: u32, token_budget: u32, response_sender: oneshot::Sender<Option<NextBatch>>, span: Span, }, } impl From<ValidParameters> for NextTokenChooserParameters { fn from(value: ValidParameters) -> Self { let (grammar, grammar_type) = match value.grammar { None => (String::new(), GrammarType::None), Some(grammar) => match grammar { ValidGrammar::Json(grammar_string) => (grammar_string, GrammarType::Json), ValidGrammar::Regex(grammar_string) => (grammar_string, GrammarType::Regex), }, }; Self { temperature: value.temperature, top_k: value.top_k, top_p: value.top_p, typical_p: value.typical_p, do_sample: value.do_sample, seed: value.seed, repetition_penalty: value.repetition_penalty, frequency_penalty: value.frequency_penalty, watermark: value.watermark, grammar, grammar_type: grammar_type.into(), } } } impl From<ValidStoppingParameters> for StoppingCriteriaParameters { fn from(value: ValidStoppingParameters) -> Self { Self { max_new_tokens: value.max_new_tokens, stop_sequences: value.stop_sequences, ignore_eos_token: value.ignore_eos_token, } } } #[cfg(test)] mod tests { use super::*; use tracing::info_span; fn default_entry() -> ( Entry, mpsc::UnboundedReceiver<Result<InferStreamResponse, InferError>>, ) { let (response_tx, receiver_tx) = mpsc::unbounded_channel(); let entry = Entry { request: ValidGenerateRequest { inputs: vec![], input_length: 0, truncate: 0, decoder_input_details: false, parameters: ValidParameters { temperature: 0.0, top_k: 0, top_p: 0.0, typical_p: 0.0, do_sample: false, seed: 0, repetition_penalty: 0.0, frequency_penalty: 0.0, watermark: false, grammar: None, }, stopping_parameters: ValidStoppingParameters { ignore_eos_token: false, max_new_tokens: 1, stop_sequences: vec![], }, top_n_tokens: 0, adapter_id: None, }, response_tx, span: info_span!("entry"), temp_span: None, queue_time: Instant::now(), batch_time: None, }; (entry, receiver_tx) } #[test] fn test_append() { let mut state = State::new(false, 1, None, 0); let (entry, _guard) = default_entry(); assert_eq!(state.next_id, 0); assert_eq!(state.entries.len(), 0); state.append(entry); assert_eq!(state.next_id, 1); assert_eq!(state.entries.len(), 1); let (id, _) = state.entries.remove(0).unwrap(); assert_eq!(id, 0); } #[test] fn test_next_batch_empty() { let mut state = State::new(false, 1, None, 0); assert!(state.next_batch(None, None, 1, 1).is_none()); assert!(state.next_batch(Some(1), None, 1, 1).is_none()); } #[test] fn test_next_batch_min_size() { let mut state = State::new(false, 1, None, 0); let (entry1, _guard1) = default_entry(); let (entry2, _guard2) = default_entry(); state.append(entry1); state.append(entry2); let (entries, batch, _) = state.next_batch(None, None, 2, 2).unwrap(); assert_eq!(entries.len(), 2); assert!(entries.contains_key(&0)); assert!(entries.contains_key(&1)); assert!(entries.get(&0).unwrap().batch_time.is_some()); assert!(entries.get(&1).unwrap().batch_time.is_some()); assert_eq!(batch.id, 0); assert_eq!(batch.size, 2); assert_eq!(state.next_id, 2); assert_eq!(state.entries.len(), 0); assert_eq!(state.next_batch_id, 1); let (entry3, _guard3) = default_entry(); state.append(entry3); assert!(state.next_batch(Some(2), None, 2, 2).is_none()); assert_eq!(state.next_id, 3); assert_eq!(state.entries.len(), 1); let (id, _) = state.entries.remove(0).unwrap(); assert_eq!(id, 2); } #[test] fn test_next_batch_max_size() { let mut state = State::new(false, 1, None, 0); let (entry1, _guard1) = default_entry(); let (entry2, _guard2) = default_entry(); state.append(entry1); state.append(entry2); let (entries, batch, _) = state.next_batch(None, Some(1), 2, 2).unwrap(); assert_eq!(entries.len(), 1); assert!(entries.contains_key(&0)); assert!(entries.get(&0).unwrap().batch_time.is_some()); assert_eq!(batch.id, 0); assert_eq!(batch.size, 1); assert_eq!(state.next_id, 2); assert_eq!(state.entries.len(), 1); assert_eq!(state.next_batch_id, 1); } #[test] fn test_next_batch_token_budget() { let mut state = State::new(false, 1, None, 0); let (entry1, _guard1) = default_entry(); let (entry2, _guard2) = default_entry(); state.append(entry1); state.append(entry2); let (entries, batch, _) = state.next_batch(None, None, 1, 1).unwrap(); assert_eq!(entries.len(), 1); assert!(entries.contains_key(&0)); assert_eq!(batch.id, 0); assert_eq!(batch.size, 1); assert_eq!(state.next_id, 2); assert_eq!(state.entries.len(), 1); assert_eq!(state.next_batch_id, 1); let (entry3, _guard3) = default_entry(); state.append(entry3); let (entries, batch, _) = state.next_batch(None, None, 3, 3).unwrap(); assert_eq!(entries.len(), 2); assert!(entries.contains_key(&1)); assert!(entries.contains_key(&2)); assert_eq!(batch.id, 1); assert_eq!(batch.size, 2); assert_eq!(state.next_id, 3); assert_eq!(state.entries.len(), 0); assert_eq!(state.next_batch_id, 2); } #[tokio::test] async fn test_queue_append() { let queue = Queue::new(false, 1, None, 0); let (entry, _guard) = default_entry(); queue.append(entry); } #[tokio::test] async fn test_queue_next_batch_empty() { let queue = Queue::new(false, 1, None, 0); assert!(queue.next_batch(None, None, 1, 1).await.is_none()); assert!(queue.next_batch(Some(1), None, 1, 1).await.is_none()); } #[tokio::test] async fn test_queue_next_batch_min_size() { let queue = Queue::new(false, 1, None, 0); let (entry1, _guard1) = default_entry(); let (entry2, _guard2) = default_entry(); queue.append(entry1); queue.append(entry2); let (entries, batch, _) = queue.next_batch(None, None, 2, 2).await.unwrap(); assert_eq!(entries.len(), 2); assert!(entries.contains_key(&0)); assert!(entries.contains_key(&1)); assert!(entries.get(&0).unwrap().batch_time.is_some()); assert!(entries.get(&1).unwrap().batch_time.is_some()); assert_eq!(batch.id, 0); assert_eq!(batch.size, 2); let (entry3, _guard3) = default_entry(); queue.append(entry3); // Not enough requests pending assert!(queue.next_batch(Some(2), None, 2, 2).await.is_none()); // Not enough token budget assert!(queue.next_batch(Some(1), None, 0, 0).await.is_none()); // Ok let (entries2, batch2, _) = queue.next_batch(Some(1), None, 2, 2).await.unwrap(); assert_eq!(entries2.len(), 1); assert!(entries2.contains_key(&2)); assert!(entries2.get(&2).unwrap().batch_time.is_some()); assert_eq!(batch2.id, 1); assert_eq!(batch2.size, 1); } #[tokio::test] async fn test_queue_next_batch_max_size() { let queue = Queue::new(false, 1, None, 0); let (entry1, _guard1) = default_entry(); let (entry2, _guard2) = default_entry(); queue.append(entry1); queue.append(entry2); let (entries, batch, _) = queue.next_batch(None, Some(1), 2, 2).await.unwrap(); assert_eq!(entries.len(), 1); assert!(entries.contains_key(&0)); assert!(entries.get(&0).unwrap().batch_time.is_some()); assert_eq!(batch.id, 0); assert_eq!(batch.size, 1); } #[tokio::test] async fn test_queue_next_batch_token_budget() { let queue = Queue::new(false, 1, None, 0); let (entry1, _guard1) = default_entry(); let (entry2, _guard2) = default_entry(); queue.append(entry1); queue.append(entry2); let (entries, batch, _) = queue.next_batch(None, None, 1, 1).await.unwrap(); assert_eq!(entries.len(), 1); assert!(entries.contains_key(&0)); assert_eq!(batch.id, 0); assert_eq!(batch.size, 1); let (entry3, _guard3) = default_entry(); queue.append(entry3); let (entries, batch, _) = queue.next_batch(None, None, 3, 3).await.unwrap(); assert_eq!(entries.len(), 2); assert!(entries.contains_key(&1)); assert!(entries.contains_key(&2)); assert_eq!(batch.id, 1); assert_eq!(batch.size, 2); } #[tokio::test] async fn test_queue_next_batch_token_speculate() { let queue = Queue::new(false, 1, None, 2); let (entry1, _guard1) = default_entry(); let (entry2, _guard2) = default_entry(); queue.append(entry1); queue.append(entry2); // Budget of 1 is not enough assert!(queue.next_batch(None, None, 1, 1).await.is_none()); let (entries, batch, _) = queue.next_batch(None, None, 6, 6).await.unwrap(); assert_eq!(entries.len(), 2); assert!(entries.contains_key(&0)); assert!(entries.contains_key(&1)); assert_eq!(batch.id, 0); assert_eq!(batch.size, 2); } #[tokio::test] async fn test_queue_next_batch_dropped_receiver() { let queue = Queue::new(false, 1, None, 0); let (entry, _) = default_entry(); queue.append(entry); assert!(queue.next_batch(None, None, 1, 1).await.is_none()); } }
text-generation-inference/router/src/infer/v2/queue.rs/0
{ "file_path": "text-generation-inference/router/src/infer/v2/queue.rs", "repo_id": "text-generation-inference", "token_count": 11013 }
252
fbgemm_commit := v0.8.0 build-fbgemm: git clone https://github.com/pytorch/FBGEMM.git fbgemm && \ cd fbgemm && git fetch && git checkout $(fbgemm_commit) && \ git submodule update --init --recursive && \ cd fbgemm_gpu && \ pip install -r requirements.txt && \ CUDA_ARCH_LIST="8.0;9.0a" NVCC_GENCODE="-gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_90a,code=sm_90a" TORCH_CUDA_ARCH_LIST="8.0;9.0a" python setup.py --package_variant genai build install-fbgemm: build-fbgemm cd fbgemm/fbgemm_gpu && \ CUDA_ARCH_LIST="8.0;9.0a" NVCC_GENCODE="-gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_90a,code=sm_90a" TORCH_CUDA_ARCH_LIST="8.0;9.0a" python setup.py --package_variant genai install
text-generation-inference/server/Makefile-fbgemm/0
{ "file_path": "text-generation-inference/server/Makefile-fbgemm", "repo_id": "text-generation-inference", "token_count": 317 }
253
#include "q4_matmul.cuh" #include "column_remap.cuh" #include <ATen/cuda/CUDAContext.h> #include "../util.cuh" #include "../matrix.cuh" #include "../cu_compat.cuh" #include "../cuda_buffers.cuh" #if defined(USE_ROCM) #include "../hip_compat.cuh" #endif const int THREADS_X = 32; // Block size and thread count along columns in w and out const int THREADS_Y = 1; // Block size and thread count along rows in x and out typedef void (*fp_q4_matmul_kernel) ( const half*, const uint32_t*, half*, const half*, const uint32_t*, const int, const int, const int, const int, const int, const uint32_t*, bool ); template<bool use_half2, bool use_groupsize, bool use_x_map> __global__ void q4_matmul_kernel ( const half* __restrict__ x, const uint32_t* __restrict__ w, half* __restrict__ out, const half* __restrict__ w_scales, const uint32_t* __restrict__ w_zeros, const int height, const int dim, const int width, const int groupsize, const int block_size_z, const uint32_t* __restrict__ x_map, bool no_zero ) { // Start of block int x_column = block_size_z * blockIdx.z; int x_column_end = min(dim, block_size_z * (blockIdx.z + 1)); int w_column = THREADS_X * blockIdx.x + threadIdx.x; int x_row = THREADS_Y * blockIdx.y + threadIdx.y; int iterations = (x_column_end - x_column) / 8; // Views MatrixView_half x_(x, height, dim); MatrixView_half w_scales_(w_scales, dim / groupsize, width); MatrixView_q4_row w_zeros_(w_zeros, dim / groupsize, width); MatrixView_q4_column w_(w, dim, width); MatrixView_half_rw out_(out, height, width); // Zero output if (!no_zero && blockIdx.z == 0 && (threadIdx.x & 1) == 0) { *((uint32_t*) out_.item_ptr(x_row, w_column)) = 0; __syncthreads(); } // Loop over part of x row (and w column) half2 acc = {}; half acc_h = {}; if constexpr (use_groupsize) { // For quant matrices where groupsize divides BLOCK_SIZE_Z we always start on a group boundary, so this // could be slightly faster for (int k = x_column, group = x_column / groupsize; k < x_column + iterations * 8; group++, k += groupsize) { if constexpr (use_half2) { half2 w_scale = w_scales_.item_half2half2(group, w_column); uint32_t w_zero = (w_zeros_.item(group, w_column) + 1) & 0x0F; if constexpr (use_x_map) acc = dot_product_8_x_map(acc, x_, x_row, k, w_, k, w_column, w_scale, w_zero, groupsize / 8, x_map); else acc = dot_product_8 (acc, x_, x_row, k, w_, k, w_column, w_scale, w_zero, groupsize / 8); } else { half w_scale = w_scales_.item(group, w_column); uint32_t w_zero = (w_zeros_.item(group, w_column) + 1) & 0x0F; if constexpr (use_x_map) acc_h = dot_product_8_x_map_h(acc_h, x_, x_row, k, w_, k, w_column, w_scale, w_zero, groupsize / 8, x_map); else acc_h = dot_product_8_h (acc_h, x_, x_row, k, w_, k, w_column, w_scale, w_zero, groupsize / 8); } } } else { // Otherwise assume groupsize is a multiple of 8, do 8 columns per iteration and trust the cache for (int k = x_column; k < x_column + iterations * 8; k += 8) { if constexpr (use_half2) { int group = k / groupsize; half2 w_scale = w_scales_.item_half2half2(group, w_column); uint32_t w_zero = (w_zeros_.item(group, w_column) + 1) & 0x0F; if constexpr (use_x_map) acc = dot_product_8_x_map(acc, x_, x_row, k, w_, k, w_column, w_scale, w_zero, 1, x_map); else acc = dot_product_8 (acc, x_, x_row, k, w_, k, w_column, w_scale, w_zero, 1); } else { int group = k / groupsize; half w_scale = w_scales_.item(group, w_column); uint32_t w_zero = (w_zeros_.item(group, w_column) + 1) & 0x0F; if constexpr (use_x_map) acc_h = dot_product_8_x_map_h(acc_h, x_, x_row, k, w_, k, w_column, w_scale, w_zero, 1, x_map); else acc_h = dot_product_8_h (acc_h, x_, x_row, k, w_, k, w_column, w_scale, w_zero, 1); } } } // Add to block result if constexpr (use_half2) { half result = __hadd(__low2half(acc), __high2half(acc)); atomicAdd(out_.item_ptr(x_row, w_column), result); } else { atomicAdd(out_.item_ptr(x_row, w_column), acc_h); } } fp_q4_matmul_kernel q4_matmul_kernel_pick(ExLlamaTuning* tuningParams, int block_size_z, int groupsize, uint32_t* x_map) { // <bool use_half2, bool use_groupsize, bool use_x_map> if (tuningParams->matmul_no_half2) { if (block_size_z % groupsize == 0) { if (x_map) return q4_matmul_kernel<false, true, true >; else return q4_matmul_kernel<false, true, false>; } else { if (x_map) return q4_matmul_kernel<false, false, true >; else return q4_matmul_kernel<false, false, false>; } } else { if (block_size_z % groupsize == 0) { if (x_map) return q4_matmul_kernel<true, true, true >; else return q4_matmul_kernel<true, true, false>; } else { if (x_map) return q4_matmul_kernel<true, false, true >; else return q4_matmul_kernel<true, false, false>; } } }; // Compute y = x @ w void q4_matmul_cuda ( ExLlamaTuning* tuningParams, const half* x, const int x_height, const Q4Matrix* w, half* out, bool no_zero, cudaStream_t alt_stream ) { int height = x_height; int dim = w->height; int width = w->width; cudaSetDevice(w->device); uint32_t* x_map = w->cuda_x_map; const half* x_mapped = x; if (x_map && !tuningParams->matmul_fused_remap && !alt_stream) { CudaBuffers* buffers = get_buffers(w->device); column_remap_cuda(x, buffers->temp_state, x_height, dim, w->cuda_x_map); x_mapped = buffers->temp_state; x_map = NULL; } int block_size_z; if (w->width == 4096) block_size_z = 384; // 7B else if (w->width == 11008) block_size_z = 256; else if (w->width == 5120) block_size_z = 384; // 13B else if (w->width == 13824) block_size_z = 256; else if (w->width == 6656) block_size_z = 256; // 33B else if (w->width == 17920) block_size_z = 128; else block_size_z = 256; //if (!no_zero) cudaMemsetAsync(out, 0, x_height * w->width * sizeof(half)); dim3 threads(THREADS_X, THREADS_Y, 1); dim3 blocks ( (width + threads.x - 1) / threads.x, (height + threads.y - 1) / threads.y, (dim + block_size_z - 1) / block_size_z ); fp_q4_matmul_kernel kernel = q4_matmul_kernel_pick(tuningParams, block_size_z, w->groupsize, x_map); kernel<<<blocks, threads, 0, alt_stream>>> (x_mapped, w->cuda_qweight, out, w->cuda_scales, w->cuda_qzeros, height, dim, width, w->groupsize, block_size_z, x_map, no_zero); } void q4_matmul_recons_cuda ( ExLlamaTuning* tuningParams, const half* x, const int x_height, Q4Matrix* w, half* out, bool no_zero, const cublasHandle_t handle ) { int height = x_height; int dim = w->height; int width = w->width; cudaSetDevice(w->device); CudaBuffers* buffers = get_buffers(w->device); const half* x_mapped = x; if (w->cuda_x_map) { column_remap_cuda(x, buffers->temp_state, x_height, dim, w->cuda_x_map); x_mapped = buffers->temp_state; } w->reconstruct(buffers->temp_dq); const half alpha = __float2half(1.0f); const half beta = no_zero ? __float2half(1.0f) : __float2half(0.0f); cublasHgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, width, height, dim, &alpha, buffers->temp_dq, width, x_mapped, dim, &beta, out, width); // const float alpha = 1.0f; // const float beta = no_zero ? 1.0f : 0.0f; // cublasSgemmEx(handle, CUBLAS_OP_N, CUBLAS_OP_N, width, height, dim, &alpha, buffers->temp_dq, CUDA_R_16F, width, // x_mapped, CUDA_R_16F, dim, &beta, out, CUDA_R_16F, width); }
text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_func/q4_matmul.cu/0
{ "file_path": "text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_func/q4_matmul.cu", "repo_id": "text-generation-inference", "token_count": 4211 }
254
#include "compat.cuh" __forceinline__ __device__ half2 dot22_8(half2(&dq)[4], const half* a_ptr, const half2 g_result, const half qs_h) { half2 result = {}; const half2* a2_ptr = (const half2*)a_ptr; #pragma unroll for (int i = 0; i < 4; i++) result = __hfma2(dq[i], *a2_ptr++, result); return __hfma2(result, __halves2half2(qs_h, qs_h), g_result); } __forceinline__ __device__ half2 dot22_16(half2(&dq)[8], const half* a_ptr, const half2 g_result, const half qs_h) { half2 result = {}; const half2* a2_ptr = (const half2*)a_ptr; #pragma unroll for (int i = 0; i < 8; i++) result = __hfma2(dq[i], *a2_ptr++, result); return __hfma2(result, __halves2half2(qs_h, qs_h), g_result); } __forceinline__ __device__ half2 dot22_32(half2(&dq)[16], const half* a_ptr, const half2 g_result, const half qs_h) { half2 result = {}; const half2* a2_ptr = (const half2*)a_ptr; #pragma unroll for (int i = 0; i < 16; i += 1) result = __hfma2(dq[i], *a2_ptr++, result); return __hfma2(result, __halves2half2(qs_h, qs_h), g_result); } __forceinline__ __device__ float dot22_8_f(half2(&dq)[4], const half* a_ptr, const float g_result, const float qs_f) { half2 result = {}; const half2* a2_ptr = (const half2*)a_ptr; #pragma unroll for (int i = 0; i < 4; i++) result = __hfma2(dq[i], *a2_ptr++, result); float result_f = __half2float(__low2half(result)) + __half2float(__high2half(result)); return fma(result_f, qs_f, g_result); } __forceinline__ __device__ float dot22_16_f(half2(&dq)[8], const half* a_ptr, const float g_result, const float qs_f) { half2 result = {}; const half2* a2_ptr = (const half2*)a_ptr; #pragma unroll for (int i = 0; i < 8; i++) result = __hfma2(dq[i], *a2_ptr++, result); float result_f = __half2float(__low2half(result)) + __half2float(__high2half(result)); return fma(result_f, qs_f, g_result); } __forceinline__ __device__ float dot22_32_f(half2(&dq)[16], const half* a_ptr, const float g_result, const float qs_f) { half2 result = {}; const half2* a2_ptr = (const half2*)a_ptr; #pragma unroll for (int i = 0; i < 16; i += 1) result = __hfma2(dq[i], *a2_ptr++, result); float result_f = __half2float(__low2half(result)) + __half2float(__high2half(result)); return fma(result_f, qs_f, g_result); } __forceinline__ __device__ half dot22_8_h(half2(&dq)[4], const half* a_ptr, const half g_result, const half qs_h) { // Use FP32 accumulator to avoid potential overflow since unscaled weights are in the range -128..127 float result = {}; #pragma unroll for (int i = 0; i < 4; i++) { half2 w01 = dq[i]; float w0 = __low2float(w01); float w1 = __high2float(w01); float x0 = __half2float(*a_ptr++); float x1 = __half2float(*a_ptr++); result = fma(w0, x0, result); result = fma(w1, x1, result); } float qs = __half2float(qs_h); result *= qs; half result_h = __float2half_rn(result); return __hadd(result_h, g_result); } __forceinline__ __device__ half dot22_16_h(half2(&dq)[8], const half* a_ptr, const half g_result, const half qs_h) { half2 result = {}; const half2* a2_ptr = (const half2*)a_ptr; #pragma unroll for (int i = 0; i < 8; i++) result = __hfma2(dq[i], *a2_ptr++, result); half result_h = __hadd(__low2half(result), __high2half(result)); return __hfma(result_h, qs_h, g_result); } __forceinline__ __device__ half dot22_32_h(half2(&dq)[16], const half* a_ptr, const half g_result, const half qs_h) { half2 result = {}; const half2* a2_ptr = (const half2*)a_ptr; #pragma unroll for (int i = 0; i < 16; i += 1) result = __hfma2(dq[i], *a2_ptr++, result); half result_h = __hadd(__low2half(result), __high2half(result)); return __hfma(result_h, qs_h, g_result); } typedef void (*fp_gemm_half_q_half_kernel) ( const half*, const uint32_t*, const uint32_t*, const half*, half*, const int, const int, const int, const int, const uint16_t*, const uint16_t*, const int, const int, const int, const int, const int, const int, const bool, const half*, const int ); template <int m_count, bool use_r_weights, bool mul_r_weights> __global__ void gemm_half_q_half_kernel ( const half* __restrict__ a, const uint32_t* __restrict__ b_q_weight, const uint32_t* __restrict__ b_q_scale, const half* __restrict__ b_q_scale_max, half* __restrict__ c, const int size_m, const int size_n, const int size_k, const int groups, const uint16_t* __restrict__ b_q_group_map, const uint16_t* __restrict__ b_q_perm, const int rows_8, const int rows_6, const int rows_5, const int rows_4, const int rows_3, const int rows_2, const bool clear, const half* r_weights, const int r_weights_stride ) { MatrixView_half a_(a, size_m, size_k); MatrixView_half_rw c_(c, size_m, size_n); MatrixView_q4_row b_q_scale_(b_q_scale, groups, size_n); int t = threadIdx.x; // Block int offset_n = blockIdx.x * EXL2_BLOCK_KN_SIZE * 4; int offset_m = blockIdx.y * m_count; int offset_k = blockIdx.z * EXL2_BLOCK_KN_SIZE; int end_n = min(offset_n + EXL2_BLOCK_KN_SIZE * 4, size_n); int end_m = min(offset_m + m_count, size_m); int end_k = min(offset_k + EXL2_BLOCK_KN_SIZE, size_k); int n = offset_n + t * 4; // Read weights half_uint16 weights[MAX_Q_GEMM_WEIGHTS]; if constexpr (use_r_weights) { uint16_t any_w = 0; const half* w_ptr = r_weights; for (int m = 0; m < m_count; ++m) { weights[m].as_half = *w_ptr; w_ptr += r_weights_stride; any_w |= weights[m].as_uint16; } if (!any_w) return; // Early exit if all weights are zero -- does not zero output (!!!) } // Preload block_a __shared__ half block_a[m_count][EXL2_BLOCK_KN_SIZE]; if (offset_k + t < end_k) { for (int m = 0; m < m_count; ++m) { const half* a_ptr = a_.item_ptr(offset_m + m, 0); half* block_a_ptr = block_a[m]; half a0 = a_ptr[b_q_perm[offset_k + t]]; // half a0 = a_ptr[offset_k + t]; block_a_ptr[t] = a0; } } // Clear if (n >= size_n) return; if (clear && blockIdx.z == 0) // && (threadIdx.x & 1) == 0) { for (int m = 0; m < m_count; m++) *((uint64_t*) c_.item_ptr(offset_m + m, n)) = 0; } __syncthreads(); // Find initial group //int group = offset_k / groupsize; int group = b_q_group_map[offset_k * 2]; // if (offset_m == 0 && t == 0) // DBGI2(offset_k, group); // Preload scales half scales[EXL2_MAX_GROUPS_IN_BLOCK][4]; //int groups_in_block = DIVIDE((end_k - offset_k), groupsize); int temp_k = offset_k; for (int g = 0; temp_k < end_k; g++) { int qscales[4]; b_q_scale_.item4(qscales, group + g, n); qscales[0]++; qscales[1]++; qscales[2]++; qscales[3]++; half maxscale = b_q_scale_max[group + g]; scales[g][0] = __hmul(__int2half_rn(qscales[0] * qscales[0]), maxscale); scales[g][1] = __hmul(__int2half_rn(qscales[1] * qscales[1]), maxscale); scales[g][2] = __hmul(__int2half_rn(qscales[2] * qscales[2]), maxscale); scales[g][3] = __hmul(__int2half_rn(qscales[3] * qscales[3]), maxscale); temp_k += b_q_group_map[temp_k * 2 + 1]; } // a, b offset int pre_rows_8 = min(rows_8, offset_k); int pre_rows_6 = offset_k > rows_8 ? min(rows_6, offset_k) - rows_8 : 0; int pre_rows_5 = offset_k > rows_6 ? min(rows_5, offset_k) - rows_6 : 0; int pre_rows_4 = offset_k > rows_5 ? min(rows_4, offset_k) - rows_5 : 0; int pre_rows_3 = offset_k > rows_4 ? min(rows_3, offset_k) - rows_4 : 0; int pre_rows_2 = offset_k > rows_3 ? min(rows_2, offset_k) - rows_3 : 0; int qk = 0; qk += pre_rows_8 / 32 * 8; qk += pre_rows_6 / 32 * 6; qk += pre_rows_5 / 32 * 5; qk += pre_rows_4 / 32 * 4; qk += pre_rows_3 / 32 * 3; qk += pre_rows_2 / 32 * 2; const uint32_t* b_ptr = b_q_weight + qk * size_n + n; const half* a_ptr = &block_a[0][0]; int a_stride = EXL2_BLOCK_KN_SIZE; // Initial group int scales_idx = 0; half qs_h0 = scales[scales_idx][0]; half qs_h1 = scales[scales_idx][1]; half qs_h2 = scales[scales_idx][2]; half qs_h3 = scales[scales_idx][3]; int nextgroup = offset_k + b_q_group_map[offset_k * 2 + 1]; // Column result half block_c[m_count][4] = {}; // Dequantize groups int k = offset_k; while (k < rows_8 && k < end_k) { if (k == nextgroup) { group++; scales_idx++; qs_h0 = scales[scales_idx][0]; qs_h1 = scales[scales_idx][1]; qs_h2 = scales[scales_idx][2]; qs_h3 = scales[scales_idx][3]; nextgroup += b_q_group_map[k * 2 + 1]; } #pragma unroll for (int j = 0; j < 4; j++) { int4 load_int4[2]; load_int4[0] = *((int4*) b_ptr); b_ptr += size_n; load_int4[1] = *((int4*) b_ptr); b_ptr += size_n; half2 dq[4][4]; dequant_8bit_8(load_int4[0].x, load_int4[1].x, dq[0], size_n); dequant_8bit_8(load_int4[0].y, load_int4[1].y, dq[1], size_n); dequant_8bit_8(load_int4[0].z, load_int4[1].z, dq[2], size_n); dequant_8bit_8(load_int4[0].w, load_int4[1].w, dq[3], size_n); for (int m = 0; m < m_count; m++) { if constexpr (use_r_weights) { if (!weights[m].as_uint16) continue; } block_c[m][0] = dot22_8_h(dq[0], a_ptr + m * a_stride, block_c[m][0], qs_h0); block_c[m][1] = dot22_8_h(dq[1], a_ptr + m * a_stride, block_c[m][1], qs_h1); block_c[m][2] = dot22_8_h(dq[2], a_ptr + m * a_stride, block_c[m][2], qs_h2); block_c[m][3] = dot22_8_h(dq[3], a_ptr + m * a_stride, block_c[m][3], qs_h3); } a_ptr += 8; } k += 32; } while (k < rows_6 && k < end_k) { if (k == nextgroup) { group++; scales_idx++; qs_h0 = scales[scales_idx][0]; qs_h1 = scales[scales_idx][1]; qs_h2 = scales[scales_idx][2]; qs_h3 = scales[scales_idx][3]; nextgroup += b_q_group_map[k * 2 + 1]; } #pragma unroll for (int j = 0; j < 2; j++) { int4 load_int4[3]; load_int4[0] = *((int4*) b_ptr); b_ptr += size_n; load_int4[1] = *((int4*) b_ptr); b_ptr += size_n; load_int4[2] = *((int4*) b_ptr); b_ptr += size_n; half2 dq[4][8]; dequant_6bit_16(load_int4[0].x, load_int4[1].x, load_int4[2].x, dq[0], size_n); dequant_6bit_16(load_int4[0].y, load_int4[1].y, load_int4[2].y, dq[1], size_n); dequant_6bit_16(load_int4[0].z, load_int4[1].z, load_int4[2].z, dq[2], size_n); dequant_6bit_16(load_int4[0].w, load_int4[1].w, load_int4[2].w, dq[3], size_n); for (int m = 0; m < m_count; m++) { if constexpr (use_r_weights) { if (!weights[m].as_uint16) continue; } block_c[m][0] = dot22_16_h(dq[0], a_ptr + m * a_stride, block_c[m][0], qs_h0); block_c[m][1] = dot22_16_h(dq[1], a_ptr + m * a_stride, block_c[m][1], qs_h1); block_c[m][2] = dot22_16_h(dq[2], a_ptr + m * a_stride, block_c[m][2], qs_h2); block_c[m][3] = dot22_16_h(dq[3], a_ptr + m * a_stride, block_c[m][3], qs_h3); } a_ptr += 16; } k += 32; } while (k < rows_5 && k < end_k) { if (k == nextgroup) { group++; scales_idx++; qs_h0 = scales[scales_idx][0]; qs_h1 = scales[scales_idx][1]; qs_h2 = scales[scales_idx][2]; qs_h3 = scales[scales_idx][3]; nextgroup += b_q_group_map[k * 2 + 1]; } #pragma unroll for (int j = 0; j < 1; j++) { int4 load_int4[5]; load_int4[0] = *((int4*) b_ptr); b_ptr += size_n; load_int4[1] = *((int4*) b_ptr); b_ptr += size_n; load_int4[2] = *((int4*) b_ptr); b_ptr += size_n; load_int4[3] = *((int4*) b_ptr); b_ptr += size_n; load_int4[4] = *((int4*) b_ptr); b_ptr += size_n; half2 dq[4][16]; dequant_5bit_32(load_int4[0].x, load_int4[1].x, load_int4[2].x, load_int4[3].x, load_int4[4].x, dq[0], size_n); dequant_5bit_32(load_int4[0].y, load_int4[1].y, load_int4[2].y, load_int4[3].y, load_int4[4].y, dq[1], size_n); dequant_5bit_32(load_int4[0].z, load_int4[1].z, load_int4[2].z, load_int4[3].z, load_int4[4].z, dq[2], size_n); dequant_5bit_32(load_int4[0].w, load_int4[1].w, load_int4[2].w, load_int4[3].w, load_int4[4].w, dq[3], size_n); for (int m = 0; m < m_count; m++) { if constexpr (use_r_weights) { if (!weights[m].as_uint16) continue; } block_c[m][0] = dot22_32_h(dq[0], a_ptr + m * a_stride, block_c[m][0], qs_h0); block_c[m][1] = dot22_32_h(dq[1], a_ptr + m * a_stride, block_c[m][1], qs_h1); block_c[m][2] = dot22_32_h(dq[2], a_ptr + m * a_stride, block_c[m][2], qs_h2); block_c[m][3] = dot22_32_h(dq[3], a_ptr + m * a_stride, block_c[m][3], qs_h3); } a_ptr += 32; } k += 32; } while (k < rows_4 && k < end_k) { if (k == nextgroup) { group++; scales_idx++; qs_h0 = scales[scales_idx][0]; qs_h1 = scales[scales_idx][1]; qs_h2 = scales[scales_idx][2]; qs_h3 = scales[scales_idx][3]; nextgroup += b_q_group_map[k * 2 + 1]; } #pragma unroll for (int j = 0; j < 4; j++) { int4 load_int4[1]; load_int4[0] = *((int4*) b_ptr); b_ptr += size_n; half2 dq[4][4]; dequant_4bit_8(load_int4[0].x, dq[0], size_n); dequant_4bit_8(load_int4[0].y, dq[1], size_n); dequant_4bit_8(load_int4[0].z, dq[2], size_n); dequant_4bit_8(load_int4[0].w, dq[3], size_n); for (int m = 0; m < m_count; m++) { if constexpr (use_r_weights) { if (!weights[m].as_uint16) continue; } block_c[m][0] = dot22_8_h(dq[0], a_ptr + m * a_stride, block_c[m][0], qs_h0); block_c[m][1] = dot22_8_h(dq[1], a_ptr + m * a_stride, block_c[m][1], qs_h1); block_c[m][2] = dot22_8_h(dq[2], a_ptr + m * a_stride, block_c[m][2], qs_h2); block_c[m][3] = dot22_8_h(dq[3], a_ptr + m * a_stride, block_c[m][3], qs_h3); } a_ptr += 8; } k += 32; } while (k < rows_3 && k < end_k) { if (k == nextgroup) { group++; scales_idx++; qs_h0 = scales[scales_idx][0]; qs_h1 = scales[scales_idx][1]; qs_h2 = scales[scales_idx][2]; qs_h3 = scales[scales_idx][3]; nextgroup += b_q_group_map[k * 2 + 1]; } #pragma unroll for (int j = 0; j < 1; j++) { int4 load_int4[3]; load_int4[0] = *((int4*) b_ptr); b_ptr += size_n; load_int4[1] = *((int4*) b_ptr); b_ptr += size_n; load_int4[2] = *((int4*) b_ptr); b_ptr += size_n; half2 dq[4][16]; dequant_3bit_32(load_int4[0].x, load_int4[1].x, load_int4[2].x, dq[0], size_n); dequant_3bit_32(load_int4[0].y, load_int4[1].y, load_int4[2].y, dq[1], size_n); dequant_3bit_32(load_int4[0].z, load_int4[1].z, load_int4[2].z, dq[2], size_n); dequant_3bit_32(load_int4[0].w, load_int4[1].w, load_int4[2].w, dq[3], size_n); for (int m = 0; m < m_count; m++) { if constexpr (use_r_weights) { if (!weights[m].as_uint16) continue; } block_c[m][0] = dot22_32_h(dq[0], a_ptr + m * a_stride, block_c[m][0], qs_h0); block_c[m][1] = dot22_32_h(dq[1], a_ptr + m * a_stride, block_c[m][1], qs_h1); block_c[m][2] = dot22_32_h(dq[2], a_ptr + m * a_stride, block_c[m][2], qs_h2); block_c[m][3] = dot22_32_h(dq[3], a_ptr + m * a_stride, block_c[m][3], qs_h3); } a_ptr += 32; } k += 32; } while (k < rows_2 && k < end_k) { if (k == nextgroup) { group++; scales_idx++; qs_h0 = scales[scales_idx][0]; qs_h1 = scales[scales_idx][1]; qs_h2 = scales[scales_idx][2]; qs_h3 = scales[scales_idx][3]; nextgroup += b_q_group_map[k * 2 + 1]; } #pragma unroll for (int j = 0; j < 1; j++) { int4 load_int4[1]; load_int4[0] = *((int4*) b_ptr); b_ptr += size_n; half2 dq[4][8]; dequant_2bit_16(load_int4[0].x, dq[0], size_n); dequant_2bit_16(load_int4[0].y, dq[1], size_n); dequant_2bit_16(load_int4[0].z, dq[2], size_n); dequant_2bit_16(load_int4[0].w, dq[3], size_n); for (int m = 0; m < m_count; m++) { if constexpr (use_r_weights) { if (!weights[m].as_uint16) continue; } block_c[m][0] = dot22_16_h(dq[0], a_ptr + m * a_stride, block_c[m][0], qs_h0); block_c[m][1] = dot22_16_h(dq[1], a_ptr + m * a_stride, block_c[m][1], qs_h1); block_c[m][2] = dot22_16_h(dq[2], a_ptr + m * a_stride, block_c[m][2], qs_h2); block_c[m][3] = dot22_16_h(dq[3], a_ptr + m * a_stride, block_c[m][3], qs_h3); } a_ptr += 16; } k += 16; } // Accumulate column sums in c for (int m = 0; m < m_count; m++) { half2* out = (half2*)c_.item_ptr(offset_m + m, n); half2 result01 = __halves2half2(block_c[m][0], block_c[m][1]); half2 result23 = __halves2half2(block_c[m][2], block_c[m][3]); if constexpr (mul_r_weights) { half2 w_mul2 = __half2half2(weights[m].as_half); result01 = __hmul2(result01, w_mul2); result23 = __hmul2(result23, w_mul2); } atomicAdd(out , result01); atomicAdd(out + 1, result23); // *out = result01; // *(out + 1) = result23; } } template <bool use_r_weights, bool mul_r_weights> struct map_m_count_exl2 { static constexpr fp_gemm_half_q_half_kernel pick_gemm_half_q_half_kernel(const int m_count) { #if EXL2_BLOCK_M_SIZE_MAX >= 1 if (m_count == 1) return gemm_half_q_half_kernel<1, use_r_weights, mul_r_weights>; #endif #if EXL2_BLOCK_M_SIZE_MAX >= 2 if (m_count == 2) return gemm_half_q_half_kernel<2, use_r_weights, mul_r_weights>; #endif #if EXL2_BLOCK_M_SIZE_MAX >= 3 if (m_count == 3) return gemm_half_q_half_kernel<3, use_r_weights, mul_r_weights>; #endif #if EXL2_BLOCK_M_SIZE_MAX >= 4 if (m_count == 4) return gemm_half_q_half_kernel<4, use_r_weights, mul_r_weights>; #endif #if EXL2_BLOCK_M_SIZE_MAX >= 5 if (m_count == 5) return gemm_half_q_half_kernel<5, use_r_weights, mul_r_weights>; #endif #if EXL2_BLOCK_M_SIZE_MAX >= 6 if (m_count == 6) return gemm_half_q_half_kernel<6, use_r_weights, mul_r_weights>; #endif #if EXL2_BLOCK_M_SIZE_MAX >= 7 if (m_count == 7) return gemm_half_q_half_kernel<7, use_r_weights, mul_r_weights>; #endif #if EXL2_BLOCK_M_SIZE_MAX >= 8 if (m_count == 8) return gemm_half_q_half_kernel<8, use_r_weights, mul_r_weights>; #endif return NULL; } }; fp_gemm_half_q_half_kernel pick_gemm_half_q_half_kernel(const int m_count, bool r_weights, bool mul_r_weights) { if (!r_weights && !mul_r_weights) return map_m_count_exl2<false, false>::pick_gemm_half_q_half_kernel(m_count); if (!r_weights && mul_r_weights) return map_m_count_exl2<false, true>::pick_gemm_half_q_half_kernel(m_count); if ( r_weights && !mul_r_weights) return map_m_count_exl2< true, false>::pick_gemm_half_q_half_kernel(m_count); if ( r_weights && mul_r_weights) return map_m_count_exl2< true, true>::pick_gemm_half_q_half_kernel(m_count); return NULL; }
text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm_kernel.cuh/0
{ "file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm_kernel.cuh", "repo_id": "text-generation-inference", "token_count": 11459 }
255
import torch from typing import List AWQ_PACK_ORDER = [0, 2, 4, 6, 1, 3, 5, 7] REVERSE_AWQ_PACK_ORDER = [0, 4, 1, 5, 2, 6, 3, 7] def pack(imatrix: torch.Tensor, direction: str = "column"): """ Packs a 4-bit integer matrix into a packed 32-bit integer matrix. Args: imatrix (torch.Tensor): matrix of integers direction (str): direction of packing, either "column" or "row" Returns: qmatrix (torch.Tensor): packed matrix of integers """ shifts = torch.arange(0, 32, 4, dtype=torch.int32, device=imatrix.device) imatrix = imatrix.to(torch.int8) & 0x0F # eventually correct overflow if direction == "column": imatrix = imatrix.view(-1, imatrix.shape[1] // (32 // 4), (32 // 4)) qmatrix = torch.bitwise_left_shift(imatrix, shifts[None, None, :]).sum(dim=-1) elif direction == "row": imatrix = imatrix.view(imatrix.shape[0] // (32 // 4), (32 // 4), -1) qmatrix = torch.bitwise_left_shift(imatrix, shifts[None, :, None]).sum(dim=1) qmatrix = qmatrix.to(torch.int32) return qmatrix def unpack(qmatrix: torch.Tensor, direction: str = "column"): """ Unpacks a 32-bit packed integer matrix into a 4-bit integer matrix. Args: qmatrix (torch.Tensor): matrix of packed integers direction (str): direction of unpacking, either "column" or "row" Returns: imatrix (torch.Tensor): matrix of integers """ shifts = torch.arange(0, 32, 4, device=qmatrix.device) if direction == "column": imatrix = torch.bitwise_right_shift( qmatrix[:, :, None], shifts[None, None, :] ).view(qmatrix.shape[0], -1) elif direction == "row": imatrix = torch.bitwise_right_shift( qmatrix[:, None, :], shifts[None, :, None] ).view(-1, qmatrix.shape[-1]) imatrix = imatrix.to(torch.int8) & 0x0F # eventually correct overflow return imatrix def apply_order( imatrix: torch.Tensor, direction: str = "column", order: List[int] = AWQ_PACK_ORDER, ): """ Applies the order to a 4-bit integer matrix. Args: imatrix (torch.Tensor): matrix of integers direction (str): direction of applying order, either "column" or "row" order (List[int]): order to apply, default is AWQ_PACK_ORDER Returns: imatrix (torch.Tensor): matrix of integers """ if direction == "column": imatrix = imatrix.view(-1, (32 // 4))[:, order].view(imatrix.shape) elif direction == "row": imatrix = imatrix.view((32 // 4), -1)[order, :].view(imatrix.shape) return imatrix def fast_awq_to_gptq(qweight, qzeros): # awq uses column packing for both weights and zeros izeros = unpack(qzeros, direction="column") iweights = unpack(qweight, direction="column") # Reverse the order of the iweight and izeros tensors izeros = apply_order(izeros, direction="column", order=REVERSE_AWQ_PACK_ORDER) iweights = apply_order(iweights, direction="column", order=REVERSE_AWQ_PACK_ORDER) # Subtract 1 from the izeros tensor (gptq adds 1 to the zeros) izeros = izeros - 1 # exllama uses row packing for weights and column packing for zeros qzeros = pack(izeros, direction="column") qweight = pack(iweights, direction="row") return qweight, qzeros
text-generation-inference/server/text_generation_server/layers/awq/conversion_utils.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/layers/awq/conversion_utils.py", "repo_id": "text-generation-inference", "token_count": 1384 }
256
from typing import TYPE_CHECKING, Optional, List import torch import torch.distributed from torch import nn from torch.distributed import ProcessGroup from text_generation_server.utils.sgmv import ( add_lora_a_bgmv, add_lora_b_bgmv, has_sgmv, lora_a_sgmv_cutlass, lora_b_sgmv_cutlass, orient_for_rank, ) if TYPE_CHECKING: from text_generation_server.adapters import AdapterBatchData from text_generation_server.adapters.lora import BatchLoraWeights class LoraLinear(nn.Module): def __init__( self, base_layer: nn.Module, layer_id: int, process_group: ProcessGroup ): super().__init__() self.base_layer = base_layer self.layer_id = layer_id self.process_group = process_group def forward_layer_type( self, result: torch.Tensor, input: torch.Tensor, adapter_data: "AdapterBatchData", layer_type: str, start_idx: int, end_idx: int, ) -> torch.Tensor: if adapter_data is None: return result data: Optional["BatchLoraWeights"] = adapter_data.data.get(layer_type) if has_sgmv() and data is not None and data.can_vectorize(self.process_group): # In tensor-parallel configurations, each GPU processes a specific segment of the output. # The 'result' tensor represents the full output, which can vary in size based on # the layer type (e.g., attention vs. feed-forward layers). We define the current # segment using start_idx and end_idx. If the segment size doesn't match this GPU's # slice of 'result', we create a zero tensor of the correct size for LoRA computation. # This approach ensures accurate LoRA application across various layer sizes and # configurations, adapting to different model architectures and parallelization strategies. # # Example scenarios where this is necessary: # 1. The adapter's size doesn't evenly divide across GPUs. # 2. We're processing the last segment which might be smaller. # 3. Different projection layers (q, k, v) have different sizes. if end_idx - start_idx != result.shape[1]: proj = torch.zeros_like(result[:, start_idx:end_idx]) else: proj = result for r, rank_segments in data.rank_data.items(): lora_a_ptr = rank_segments.lora_a_ptr lora_b_ptr = rank_segments.lora_b_ptr if lora_a_ptr is None or lora_b_ptr is None: raise ValueError("LoRA data is missing") if data.use_sgmv: # Use SGMV for prefill v = lora_a_sgmv_cutlass( input, rank_segments.tmp_shrink, lora_a_ptr, rank_segments.segment_starts, rank_segments.segment_ends, self.layer_id, r, ) if self.process_group.size() > 1: v = self.collect_lora_a(v) lora_b_sgmv_cutlass( proj, v, rank_segments.tmp_expand, lora_b_ptr, rank_segments.segment_starts, rank_segments.segment_ends, self.layer_id, ) else: # Use BGMV for decode v = torch.zeros( (input.size(0), r), dtype=input.dtype, device=input.device ) # TODO: error with [-1, 0], but not [0, -1] add_lora_a_bgmv( v, input, lora_a_ptr, rank_segments.indices, self.layer_id, ) if self.process_group.size() > 1: v = self.collect_lora_a(v) add_lora_b_bgmv( proj, v, lora_b_ptr, rank_segments.indices, self.layer_id, ) if end_idx - start_idx != result.shape[1]: result[:, start_idx:end_idx] += proj else: for adapter_index in adapter_data.meta.adapter_set: if data is not None and data.has_adapter(adapter_index): adapter_mask = ( (adapter_data.meta.adapter_indices == adapter_index) .to(input.dtype) .view(-1, 1) ) layer_result = self.forward_lora( input, data, adapter_index, adapter_mask ) result[:, start_idx:end_idx] += layer_result return result def forward_lora( self, input: torch.Tensor, data: "BatchLoraWeights", adapter_index: int, adapter_mask: torch.Tensor, ) -> torch.Tensor: lora_a = data.lora_a[adapter_index][self.layer_id, :, :] lora_b = data.lora_b[adapter_index][self.layer_id, :, :] lora_a = orient_for_rank(lora_a, lora_b.size(0)) a_out = input @ lora_a if self.process_group.size() > 1: a_out = self.collect_lora_a(a_out) result = (a_out @ lora_b) * adapter_mask return result def collect_lora_a(self, a_out: torch.Tensor) -> torch.Tensor: raise NotImplementedError("Implemented in subclasses") class TensorParallelMultiAdapterLinear(LoraLinear): def __init__( self, base_layer: nn.Module, layer_id: int, layer_names: List[str], sizes: List[int], process_group: ProcessGroup, ): super().__init__(base_layer, layer_id, process_group) self.layer_names = layer_names self.sizes = sizes @classmethod def load( cls, base_layer: nn.Module, layer_id: int, layer_names: List[str], sizes: List[int], process_group: ProcessGroup, ): return TensorParallelMultiAdapterLinear( base_layer, layer_id, layer_names, sizes, process_group ) def forward( self, input: torch.Tensor, adapter_data: "AdapterBatchData" ) -> torch.Tensor: result = self.base_layer(input) # noop if no layer names are provided (e.g. for models without adapters) if self.layer_names is None: return result # handle models like Bloom that have inputs of shape # (batch_size, sequence_length, hidden_size) # we need to reshape them to (batch_size * sequence_length, hidden_size) # for the LoRA computation, then reshape back prev_shape = result.shape is_3d = len(input.shape) >= 3 if is_3d: input = input.reshape(-1, input.shape[-1]) result = result.reshape(-1, result.shape[-1]) offset = 0 for i, layer_name in enumerate(self.layer_names): start_idx = offset // self.process_group.size() # The 'sizes' parameter is essential in tensor-parallel setups for handling multiple # projection layers (q_proj, k_proj, v_proj) by defining their output dimensions. It # ensures correct slicing of the result tensor, accommodating variations like grouped-query # attention where k_proj and v_proj differ from q_proj. This allows precise application of # LoRA adapters to each sub-component of the multi-head attention mechanism, managing the # different projection sizes across layers and model architectures. if self.sizes is not None: offset += self.sizes[i] end_idx = offset // self.process_group.size() else: end_idx = result.shape[1] result = self.forward_layer_type( result, input, adapter_data, layer_name, start_idx, end_idx ) if is_3d: result = result.reshape(prev_shape) return result def collect_lora_a(self, a_out: torch.Tensor) -> torch.Tensor: # Tensor parallel implementation of X @ A@B, where A and B are sharded column-wise. # We use an all-gather between X@A and (X@A)@B to ensure alignment across ranks. # # TODO(travis): this is not very efficient as we do an all-gather for every adapter, # instead we could pre-allocate a (B, a, r) tensor for all adapters with the same # rank, compute `a_out` on each, and then slice them into the buffer as shown here: # https://discuss.pytorch.org/t/concatenate-tensors-without-memory-copying/34609 gathered_tensors = [ torch.empty_like(a_out) for _ in range(self.process_group.size()) ] torch.distributed.all_gather(gathered_tensors, a_out) return torch.cat(gathered_tensors, dim=1) class TensorParallelAdapterRowLinear(LoraLinear): def __init__(self, base_layer, layer_id, layer_name, process_group): super().__init__(base_layer, layer_id, process_group) self.layer_name = layer_name @classmethod def load(cls, base_layer, layer_id, layer_name, process_group): return cls(base_layer, layer_id, layer_name, process_group) def forward( self, input: torch.Tensor, adapter_data: "AdapterBatchData" ) -> torch.Tensor: result = self.base_layer(input) if self.layer_name is None: return result # Fused all-gather + all-reduce from S-LoRA paper: https://arxiv.org/abs/2311.03285 stride = result.shape[-1] // self.process_group.size() start_idx = self.process_group.rank() * stride end_idx = (self.process_group.rank() + 1) * stride self.forward_layer_type( result, input, adapter_data, self.layer_name, start_idx, end_idx ) return result def collect_lora_a(self, a_out: torch.Tensor) -> torch.Tensor: # Tensor parallel implementation of X @ A@B, where A and B are sharded row-wise. # We use an all-reduce between X@A and (X@A)@B to ensure alignment across ranks. # # TODO(travis): this is not very efficient as we do an all-reduce for every adapter, # instead we could pre-allocate a (B, a, r) tensor for all adapters with the same # rank, compute `a_out` on each, and then slice them into the buffer as shown here: # https://discuss.pytorch.org/t/concatenate-tensors-without-memory-copying/34609 torch.distributed.all_reduce(a_out, group=self.process_group) return a_out
text-generation-inference/server/text_generation_server/layers/lora.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/layers/lora.py", "repo_id": "text-generation-inference", "token_count": 5398 }
257
from typing import Optional, Tuple import torch from torch import nn from transformers.activations import ACT2FN from transformers.modeling_attn_mask_utils import ( _create_4d_causal_attention_mask, _prepare_4d_attention_mask, ) from transformers.modeling_outputs import ( BaseModelOutputWithPooling, ) from transformers import CLIPConfig, CLIPTextConfig, CLIPVisionConfig from text_generation_server.layers import ( TensorParallelEmbedding, TensorParallelColumnLinear, TensorParallelRowLinear, ) class CLIPVisionEmbeddings(nn.Module): def __init__(self, prefix, config: CLIPVisionConfig, weights): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size # TODO Should we TP this ? self.class_embedding = weights.get_tensor(f"{prefix}.class_embedding") self.patch_embedding = nn.Conv2d( in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=False, ) self.patch_embedding.weight = nn.Parameter( weights.get_tensor(f"{prefix}.patch_embedding.weight"), requires_grad=False ) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = TensorParallelEmbedding( prefix=f"{prefix}.position_embedding", weights=weights ) self.register_buffer( "position_ids", torch.arange(self.num_positions, device=weights.device).expand((1, -1)), persistent=False, ) def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: batch_size = pixel_values.shape[0] target_dtype = self.patch_embedding.weight.dtype patch_embeds = self.patch_embedding( pixel_values.to(dtype=target_dtype) ) # shape = [*, width, grid, grid] patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) embeddings = embeddings + self.position_embedding(self.position_ids) return embeddings class CLIPTextEmbeddings(nn.Module): def __init__(self, config: CLIPTextConfig): super().__init__() embed_dim = config.hidden_size self.token_embedding = nn.Embedding(config.vocab_size, embed_dim) self.position_embedding = nn.Embedding( config.max_position_embeddings, embed_dim ) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False, ) def forward( self, input_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, ) -> torch.Tensor: seq_length = ( input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2] ) if position_ids is None: position_ids = self.position_ids[:, :seq_length] if inputs_embeds is None: inputs_embeds = self.token_embedding(input_ids) position_embeddings = self.position_embedding(position_ids) embeddings = inputs_embeds + position_embeddings return embeddings class CLIPAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, prefix, config, weights): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_size = self.embed_dim // self.num_heads if self.head_size * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) self.num_heads = self.num_heads // weights.process_group.size() self.embed_dim = self.embed_dim // weights.process_group.size() self.scale = self.head_size**-0.5 self.dropout = config.attention_dropout self.qkv = TensorParallelColumnLinear.load_multi( config, prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"], dim=0, weights=weights, bias=True, ) self.out_proj = TensorParallelRowLinear.load( config, prefix=f"{prefix}.out_proj", weights=weights, bias=True, ) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return ( tensor.view(bsz, seq_len, self.num_heads, self.head_size) .transpose(1, 2) .contiguous() ) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, causal_attention_mask: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" bsz, tgt_len, _ = hidden_states.size() # get query proj qkv = self.qkv(hidden_states) query_states, key_states, value_states = qkv.split( [ self.head_size * self.num_heads, ] * 3, dim=2, ) query_states = query_states * self.scale key_states = self._shape(key_states, -1, bsz) value_states = self._shape(value_states, -1, bsz) proj_shape = (bsz * self.num_heads, -1, self.head_size) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) # apply the causal_attention_mask first if causal_attention_mask is not None: if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" f" {causal_attention_mask.size()}" ) attn_weights = ( attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask ) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = ( attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask ) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) attn_probs = nn.functional.dropout( attn_weights, p=self.dropout, training=self.training ) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_size): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_size)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_size) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, None class CLIPMLP(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = TensorParallelColumnLinear.load( prefix=f"{prefix}.fc1", config=config, weights=weights, bias=True ) self.fc2 = TensorParallelRowLinear.load( prefix=f"{prefix}.fc2", config=config, weights=weights, bias=True ) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states class CLIPEncoderLayer(nn.Module): def __init__(self, prefix, config: CLIPConfig, weights): super().__init__() self.embed_dim = config.hidden_size self.self_attn = CLIPAttention( prefix=f"{prefix}.self_attn", config=config, weights=weights ) self.layer_norm1 = nn.LayerNorm.load( prefix=f"{prefix}.layer_norm1", weights=weights, eps=config.layer_norm_eps ) self.mlp = CLIPMLP(prefix=f"{prefix}.mlp", config=config, weights=weights) self.layer_norm2 = nn.LayerNorm.load( prefix=f"{prefix}.layer_norm2", weights=weights, eps=config.layer_norm_eps ) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, ): """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. `(config.encoder_attention_heads,)`. """ residual = hidden_states hidden_states = self.layer_norm1(hidden_states) hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, ) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states return hidden_states class CLIPPreTrainedModel(nn.Module): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = CLIPConfig base_model_prefix = "clip" supports_gradient_checkpointing = True CLIP_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`CLIPConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ CLIP_TEXT_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) """ CLIP_VISION_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details. """ CLIP_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details. return_loss (`bool`, *optional*): Whether or not to return the contrastive loss. """ class CLIPEncoder(nn.Module): """ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`CLIPEncoderLayer`]. Args: config: CLIPConfig """ def __init__(self, prefix, config: CLIPConfig, weights): super().__init__() self.config = config self.layers = nn.ModuleList( [ CLIPEncoderLayer( prefix=f"{prefix}.layers.{i}", config=config, weights=weights ) for i in range(config.num_hidden_layers) ] ) def forward( self, inputs_embeds, attention_mask: Optional[torch.Tensor] = None, causal_attention_mask: Optional[torch.Tensor] = None, ): r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Causal mask for the text model. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) """ hidden_states = inputs_embeds for idx, encoder_layer in enumerate(self.layers): hidden_states = encoder_layer( hidden_states, attention_mask, causal_attention_mask, ) return hidden_states class CLIPTextTransformer(nn.Module): def __init__(self, prefix: str, config: CLIPTextConfig, weights=None): super().__init__() self.config = config embed_dim = config.hidden_size self.embeddings = CLIPTextEmbeddings(config) # Initialize weights and apply final processing with `self.post_init()` self.encoder = CLIPEncoder( prefix=f"{prefix}.encoder", config=config, weights=weights ) self.final_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) # For `pooled_output` computation self.eos_token_id = config.eos_token_id def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, ): r""" Returns: """ if input_ids is None: raise ValueError("You have to specify input_ids") input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids) # CLIP's text model uses causal mask, prepare it here. # https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324 causal_attention_mask = _create_4d_causal_attention_mask( input_shape, hidden_states.dtype, device=hidden_states.device ) # expand attention_mask if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _prepare_4d_attention_mask( attention_mask, hidden_states.dtype ) encoder_outputs = self.encoder( inputs_embeds=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, ) last_hidden_state = encoder_outputs[0] last_hidden_state = self.final_layer_norm(last_hidden_state) if self.eos_token_id == 2: # The `eos_token_id` was incorrect before PR #24773: Let's keep what have been done here. # A CLIP model with such `eos_token_id` in the config can't work correctly with extra new tokens added # ------------------------------------------------------------ # text_embeds.shape = [batch_size, sequence_length, transformer.width] # take features from the eot embedding (eot_token is the highest number in each sequence) # casting to torch.int for onnx compatibility: argmax doesn't support int64 inputs with opset 14 last_hidden_state[ torch.arange( last_hidden_state.shape[0], device=last_hidden_state.device ), input_ids.to(dtype=torch.int, device=last_hidden_state.device).argmax( dim=-1 ), ] else: # The config gets updated `eos_token_id` from PR #24773 (so the use of exta new tokens is possible) last_hidden_state[ torch.arange( last_hidden_state.shape[0], device=last_hidden_state.device ), # We need to get the first position of `eos_token_id` value (`pad_token_ids` might equal to `eos_token_id`) ( input_ids.to(dtype=torch.int, device=last_hidden_state.device) == self.eos_token_id ) .int() .argmax(dim=-1), ] return last_hidden_state class CLIPTextModel(CLIPPreTrainedModel): config_class = CLIPTextConfig _no_split_modules = ["CLIPTextEmbeddings", "CLIPEncoderLayer"] def __init__(self, prefix, config: CLIPTextConfig): super().__init__(config) self.text_model = CLIPTextTransformer(prefix, config) # Initialize weights and apply final processing self.post_init() def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, ): r""" Returns: Examples: ```python >>> from transformers import AutoTokenizer, CLIPTextModel >>> model = CLIPTextModel.from_pretrained("openai/clip-vit-base-patch32") >>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32") >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output # pooled (EOS token) states ```""" return self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, ) class CLIPVisionTransformer(nn.Module): def __init__(self, prefix, config: CLIPVisionConfig, weights): super().__init__() self.config = config self.embeddings = CLIPVisionEmbeddings( prefix=f"{prefix}.embeddings", config=config, weights=weights ) self.pre_layrnorm = nn.LayerNorm.load( prefix=f"{prefix}.pre_layrnorm", weights=weights, eps=config.layer_norm_eps ) self.encoder = CLIPEncoder( prefix=f"{prefix}.encoder", config=config, weights=weights ) # self.post_layernorm = nn.LayerNorm.load(prefix=f"{prefix}.post_layernorm", weights=weights, eps=config.layer_norm_eps) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, ): r""" Returns: """ if pixel_values is None: raise ValueError("You have to specify pixel_values") hidden_states = self.embeddings(pixel_values) hidden_states = self.pre_layrnorm(hidden_states) encoder_outputs = self.encoder( inputs_embeds=hidden_states, ) last_hidden_state = encoder_outputs # pooled_output = last_hidden_state[:, 0, :] # pooled_output = self.post_layernorm(pooled_output) return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, # pooler_output=pooled_output, # hidden_states=encoder_outputs, ) class CLIPVisionModel(CLIPPreTrainedModel): config_class = CLIPVisionConfig main_input_name = "pixel_values" _no_split_modules = ["CLIPEncoderLayer"] def __init__(self, config: CLIPVisionConfig): super().__init__(config) self.vision_model = CLIPVisionTransformer(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Module: return self.vision_model.embeddings.patch_embedding def forward( self, pixel_values: Optional[torch.FloatTensor] = None, ): r""" Returns: Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, CLIPVisionModel >>> model = CLIPVisionModel.from_pretrained("openai/clip-vit-base-patch32") >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output # pooled CLS states ```""" return self.vision_model( pixel_values=pixel_values, ) class CLIPModel(nn.Module): def __init__(self, prefix, config: CLIPConfig, weights): super().__init__() text_config = config.text_config vision_config = config.vision_config self.projection_dim = config.projection_dim self.text_embed_dim = text_config.hidden_size self.vision_embed_dim = vision_config.hidden_size self.text_model = CLIPTextTransformer(text_config) self.vision_model = CLIPVisionTransformer(vision_config) self.visual_projection = nn.Linear( self.vision_embed_dim, self.projection_dim, bias=False ) self.text_projection = nn.Linear( self.text_embed_dim, self.projection_dim, bias=False ) self.logit_scale = nn.Parameter( torch.tensor(self.config.logit_scale_init_value) ) # Initialize weights and apply final processing self.post_init() def get_text_features( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, ) -> torch.FloatTensor: r""" Returns: text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`CLIPTextModel`]. Examples: ```python >>> from transformers import AutoTokenizer, CLIPModel >>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") >>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32") >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") >>> text_features = model.get_text_features(**inputs) ```""" text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, ) pooled_output = text_outputs[1] text_features = self.text_projection(pooled_output) return text_features def get_image_features( self, pixel_values: Optional[torch.FloatTensor] = None, ) -> torch.FloatTensor: r""" Returns: image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`CLIPVisionModel`]. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, CLIPModel >>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> image_features = model.get_image_features(**inputs) ```""" # Use CLIP model's config for some fields (if specified) instead of those of vision & text components. vision_outputs = self.vision_model( pixel_values=pixel_values, ) pooled_output = vision_outputs[1] # pooled_output image_features = self.visual_projection(pooled_output) return image_features def forward( self, input_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, ): r""" Returns: Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, CLIPModel >>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor( ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True ... ) >>> outputs = model(**inputs) >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities ```""" # Use CLIP model's config for some fields (if specified) instead of those of vision & text components. vision_outputs = self.vision_model( pixel_values=pixel_values, ) text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, ) image_embeds = vision_outputs[1] image_embeds = self.visual_projection(image_embeds) text_embeds = text_outputs[1] text_embeds = self.text_projection(text_embeds) # normalized features image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True) text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True) # cosine similarity as logits logit_scale = self.logit_scale.exp() logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale logits_per_image = logits_per_text.t() return logits_per_image, logits_per_text
text-generation-inference/server/text_generation_server/models/custom_modeling/clip.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/clip.py", "repo_id": "text-generation-inference", "token_count": 13765 }
258
import torch import torch.distributed from torch import nn from transformers.activations import ACT2FN from typing import Optional, List, Tuple from text_generation_server.layers.attention import ( paged_attention, attention, reshape_and_cache, Seqlen, ) from text_generation_server.layers import ( TensorParallelRowLinear, TensorParallelColumnLinear, SpeculativeHead, TensorParallelEmbedding, get_linear, ) from text_generation_server.layers.gptq import GPTQWeightsLoader from text_generation_server.layers.layernorm import ( FastLayerNorm, ) def load_multi_mqa( config, prefix: str, weights, bias: bool, head_size, num_heads, hidden_size ): if config.quantize == "gptq": return _load_multi_mqa_gptq( config, prefix, weights, bias, head_size, num_heads, hidden_size ) elif config.quantize == "marlin": raise RuntimeError( "santacoder models with marlin quantization are not yet supported" ) else: return _load_multi_mqa( config, prefix, weights, bias, head_size, num_heads, hidden_size ) def _load_multi_mqa_gptq( config, prefix: str, weights, bias: bool, head_size, num_heads, hidden_size ): from text_generation_server.layers.gptq import GPTQWeight if any("c_attn" in k for k in weights.routing.keys()) and not config.transpose: world_size = weights.process_group.size() rank = weights.process_group.rank() slice_ = weights._get_slice(f"{prefix}.c_attn.qweight") shape = slice_.get_shape() block_size = (shape[1] - 2 * head_size) // world_size start = rank * block_size stop = (rank + 1) * block_size assert (shape[1] - 2 * head_size) % world_size == 0 q_tensor = slice_[:, start:stop] kv_tensor = slice_[:, -2 * head_size :] qweight = torch.cat([q_tensor, kv_tensor], dim=1) qweight = qweight.to(device=weights.device) slice_ = weights._get_slice(f"{prefix}.c_attn.scales") shape = slice_.get_shape() block_size = (shape[1] - 2 * head_size) // world_size start = rank * block_size stop = (rank + 1) * block_size assert (shape[1] - 2 * head_size) % world_size == 0 q_tensor = slice_[:, start:stop] kv_tensor = slice_[:, -2 * head_size :] scales = torch.cat([q_tensor, kv_tensor], dim=1) scales = scales.to(device=weights.device) slice_ = weights._get_slice(f"{prefix}.c_attn.qzeros") shape = slice_.get_shape() block_size = (shape[1] - (2 * head_size) * 4 // 32) // world_size start = rank * block_size stop = (rank + 1) * block_size assert 2 * head_size % (32 // 4) == 0 q_tensor = slice_[:, start:stop] kv_tensor = slice_[:, -2 * head_size * 4 // 32 :] qzeros = torch.cat([q_tensor, kv_tensor], dim=1) qzeros = qzeros.to(device=weights.device) loader = weights.weights_loader assert isinstance(loader, GPTQWeightsLoader) loader._get_gptq_params(weights) if loader.quant_method == "gptq": g_idx = weights.get_tensor(f"{prefix}.c_attn.g_idx") g_idx = g_idx.to(device=weights.device) elif loader.quant_method == "awq": g_idx = None from text_generation_server.layers.awq.conversion_utils import ( fast_awq_to_gptq, ) qweight, qzeros = fast_awq_to_gptq(qweight, qzeros) from text_generation_server.layers.gptq import HAS_EXLLAMA weight = GPTQWeight( qweight=qweight, qzeros=qzeros, scales=scales, g_idx=g_idx, bits=loader.bits, groupsize=loader.groupsize, use_awq_kernel=loader.quantize == "awq", use_exllama=HAS_EXLLAMA, ) if bias: slice_ = weights._get_slice(f"{prefix}.c_attn.bias") shape = slice_.get_shape() block_size = (shape[0] - 2 * head_size) // world_size assert (shape[0] - 2 * head_size) % world_size == 0 q_tensor = slice_[start:stop] start = rank * block_size stop = (rank + 1) * block_size q_tensor = slice_[start:stop] kv_tensor = slice_[-2 * head_size :] bias = torch.cat([q_tensor, kv_tensor], dim=0) bias = bias.to(device=weights.device) return TensorParallelColumnLinear(get_linear(weight, bias)) else: raise NotImplementedError("Gptq loading with santacoder is not implemented") def _load_multi_mqa( config, prefix: str, weights, bias: bool, head_size, num_heads, hidden_size ): if any("c_attn" in k for k in weights.routing.keys()): slice_ = weights._get_slice(f"{prefix}.c_attn.weight") shape = slice_.get_shape() world_size = weights.process_group.size() rank = weights.process_group.rank() if config.transpose: block_size = (shape[1] - 2 * head_size) // world_size start = rank * block_size stop = (rank + 1) * block_size assert (shape[1] - 2 * head_size) % world_size == 0 q_tensor = slice_[:, start:stop] kv_tensor = slice_[:, -2 * head_size :] weight = torch.cat([q_tensor, kv_tensor], dim=1).T else: block_size = (shape[0] - 2 * head_size) // world_size start = rank * block_size stop = (rank + 1) * block_size assert (shape[0] - 2 * head_size) % world_size == 0 q_tensor = slice_[start:stop] kv_tensor = slice_[-2 * head_size :] weight = torch.cat([q_tensor, kv_tensor], dim=0) if bias: slice_ = weights._get_slice(f"{prefix}.c_attn.bias") shape = slice_.get_shape() block_size = (shape[0] - 2 * head_size) // world_size assert (shape[0] - 2 * head_size) % world_size == 0 start = rank * block_size stop = (rank + 1) * block_size q_tensor = slice_[start:stop] kv_tensor = slice_[-2 * head_size :] bias = torch.cat([q_tensor, kv_tensor], dim=0) else: if config.transpose: w = [ weights.get_sharded(f"{prefix}.q_attn.weight", dim=1).T, weights.get_tensor(f"{prefix}.kv_attn.weight").T, ] weight = torch.cat(w, dim=0) else: w = [ weights.get_sharded(f"{prefix}.q_attn.weight", dim=0), weights.get_tensor(f"{prefix}.kv_attn.weight"), ] weight = torch.cat(w, dim=1) if bias: b = [ weights.get_sharded(f"{prefix}.q_attn.bias", dim=0), weights.get_tensor(f"{prefix}.kv_attn.bias"), ] bias = torch.cat(b, dim=0) else: bias = None weight = weight.to(dtype=weights.dtype).to(device=weights.device) assert list(weight.shape) == [ (num_heads + 2) * head_size, hidden_size, ], f"{weight.shape} != {[(num_heads + 2) * head_size, hidden_size]}" if bias is not None: bias = bias.to(dtype=weights.dtype).to(device=weights.device) assert list(bias.shape) == [ (num_heads + 2) * head_size ], f"{weight.shape} != {[(num_heads + 2) * head_size]}" return TensorParallelColumnLinear(get_linear(weight, bias)) def load_col(config, prefix: str, weights, bias: bool): if config.transpose: weight = weights.get_sharded(f"{prefix}.weight", dim=1).T else: weight = weights.get_multi_weights_col([prefix], dim=0) if bias: bias = weights.get_sharded(f"{prefix}.bias", dim=0) else: bias = None return TensorParallelColumnLinear(get_linear(weight, bias)) def load_row(config, prefix: str, weights, bias: bool): if config.transpose: weight = weights.get_sharded(f"{prefix}.weight", dim=0).T else: weight = weights.get_weights_row(prefix) if bias and weights.process_group.rank() == 0: # Rank is only on the first rank process bias = weights.get_tensor(f"{prefix}.bias") else: bias = None return TensorParallelRowLinear( get_linear(weight, bias), process_group=weights.process_group ) class FlashMQAttention(torch.nn.Module): def __init__(self, prefix, config, weights): super().__init__() num_heads = config.num_attention_heads hidden_size = config.hidden_size self.num_heads = num_heads self.hidden_size = hidden_size self.head_size = hidden_size // num_heads if self.num_heads % weights.process_group.size() != 0: raise ValueError( f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} " f"and `num_shards`: {weights.process_group.size()}" ) self.num_heads = self.num_heads // weights.process_group.size() self.softmax_scale = self.head_size ** (-0.5) self.c_attn = load_multi_mqa( config, prefix=prefix, weights=weights, bias=True, head_size=self.head_size, hidden_size=hidden_size, num_heads=self.num_heads, ) self.c_proj = load_row( config, prefix=f"{prefix}.c_proj", weights=weights, bias=True ) self.kv_head_mapping = torch.zeros( self.num_heads, dtype=torch.int32, device=weights.device ) def forward( self, hidden_states, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, ): qkv = self.c_attn(hidden_states) # Split query from key_value query, key_value = qkv.split( [self.head_size * self.num_heads, 2 * self.head_size], dim=1 ) # Prepare query and key_value for indexing query = query.view(-1, self.num_heads, self.head_size) key_value = key_value.view(-1, 2, 1, self.head_size) reshape_and_cache( key_value[:, 0], key_value[:, 1], kv_cache[0], kv_cache[1], slots ) # Prefill if cu_seqlen_prefill is not None: # flash attention attn_output = attention( query, kv_cache[0], kv_cache[1], seqlen, block_tables, self.softmax_scale, ) # Decode else: attn_output = paged_attention( query, kv_cache[0], kv_cache[1], self.kv_head_mapping, self.softmax_scale, block_tables, seqlen, max_s, ) return self.c_proj(attn_output.view(-1, self.num_heads * self.head_size)) class MLP(nn.Module): def __init__(self, prefix, config, weights): super().__init__() act = config.activation_function self.act = ( ACT2FN[act] if "gelu" not in act else lambda x: torch.nn.functional.gelu( x, approximate=( "tanh" if act in ["gelu_fast", "gelu_pytorch_tanh"] else "none" ), ) ) self.c_fc = load_col( config, prefix=f"{prefix}.c_fc", weights=weights, bias=True ) self.c_proj = load_row( config, prefix=f"{prefix}.c_proj", weights=weights, bias=True ) def forward(self, hidden_states): hidden_states = self.c_fc(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.c_proj(hidden_states) return hidden_states class Block(nn.Module): def __init__(self, prefix: str, layer_id, config, weights): super().__init__() prefix = f"{prefix}.h.{layer_id}" self.ln_1 = FastLayerNorm.load( prefix=f"{prefix}.ln_1", weights=weights, eps=config.layer_norm_epsilon ) self.ln_2 = FastLayerNorm.load( prefix=f"{prefix}.ln_2", weights=weights, eps=config.layer_norm_epsilon ) self.self_attn = FlashMQAttention( prefix=f"{prefix}.attn", config=config, weights=weights, ) self.mlp = MLP( prefix=f"{prefix}.mlp", config=config, weights=weights, ) def forward( self, hidden_states, residual, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, ): hidden_states, residual = self.ln_1(hidden_states, residual) hidden_states = self.self_attn( hidden_states, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, ) hidden_states, residual = self.ln_2(hidden_states, residual) mlp_output = self.mlp(hidden_states) return mlp_output, residual class FlashSantacoderModel(nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() self.config = config self.process_group = weights.process_group self.wte = TensorParallelEmbedding( prefix=f"{prefix}.wte", weights=weights, reduce=False, ) self.wpe = TensorParallelEmbedding( prefix=f"{prefix}.wpe", weights=weights, reduce=False, ) self.layers = nn.ModuleList( [ Block( prefix, layer_id, config, weights, ) for layer_id in range(config.num_hidden_layers) ] ) self.ln_f = FastLayerNorm.load( prefix="transformer.ln_f", weights=weights, eps=config.layer_norm_epsilon ) self.head_size = self.layers[0].self_attn.head_size self.num_heads = self.layers[0].self_attn.num_heads def forward( self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int, ) -> torch.Tensor: hidden_states = self.wte(input_ids) + self.wpe(position_ids) if self.process_group.size() > 1: torch.distributed.all_reduce(hidden_states, group=self.process_group) residual = None for i, layer in enumerate(self.layers): hidden_states, residual = layer( hidden_states, residual, cu_seqlen_prefill, kv_cache[i], block_tables, slots, seqlen, max_s, ) hidden_states, _ = self.ln_f(hidden_states, residual) return hidden_states class FlashSantacoderForCausalLM(nn.Module): def __init__(self, prefix, config, weights): super().__init__() if not prefix: prefix = "transformer" else: prefix = f"{prefix}.transformer" config.transpose = config.architectures[0].startswith("GPT2") self.model = FlashSantacoderModel(prefix, config, weights) self.lm_head = SpeculativeHead.load( config, prefix=f"{prefix}.wte", weights=weights ) def forward( self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int, prefill_cache_indices: Optional[torch.Tensor], lm_head_indices: Optional[torch.Tensor] = None, adapter_data: Optional[torch.Tensor] = None, ) -> torch.Tensor: hidden_states = self.model( input_ids, position_ids, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, ) if lm_head_indices is not None: hidden_states = hidden_states[lm_head_indices] logits = self.lm_head(hidden_states) return logits
text-generation-inference/server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py", "repo_id": "text-generation-inference", "token_count": 8477 }
259
# coding=utf-8 # Copyright 2018 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch T5 model.""" import copy import math import warnings from typing import Optional, Tuple, Union from loguru import logger import torch import torch.distributed from torch import nn from torch.nn import CrossEntropyLoss from transformers.activations import ACT2FN from transformers.modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, ) from transformers.modeling_utils import PreTrainedModel from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS from transformers.utils import ( is_torch_fx_proxy, ) from transformers import T5Config from text_generation_server.layers import ( TensorParallelColumnLinear, TensorParallelEmbedding, TensorParallelRowLinear, SpeculativeHead, ) # copied from https://github.com/huggingface/transformers/blob/cd4584e3c809bb9e1392ccd3fe38b40daba5519a/src/transformers/models/t5/modeling_t5.py#L1316 # Warning message for FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask __HEAD_MASK_WARNING_MSG = """ The input argument `head_mask` was split into two arguments `head_mask` and `decoder_head_mask`. Currently, `decoder_head_mask` is set to copy `head_mask`, but this feature is deprecated and will be removed in future versions. If you do not want to use any `decoder_head_mask` now, please set `decoder_head_mask = torch.ones(num_layers, num_heads)`. """ class PartialTPEmbedding(nn.Module): def __init__(self, prefix: str, weights): super().__init__() weight = weights.get_sharded(f"{prefix}.weight", dim=1) self.weight = nn.Parameter(weight) def forward(self, input: torch.Tensor) -> torch.Tensor: return torch.nn.functional.embedding(input, self.weight) @torch.jit.script def layer_norm(hidden_states, weight, epsilon): # T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for # half-precision inputs is done in fp32 variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + epsilon) # convert into half-precision if necessary if weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(weight.dtype) return weight * hidden_states class T5LayerNorm(nn.Module): def __init__(self, prefix, weights, eps=1e-6): """ Construct a layernorm module in the T5 style. No bias and no subtraction of mean. """ super().__init__() weight = weights.get_tensor(f"{prefix}.weight") self.weight = nn.Parameter(weight) self.variance_epsilon = torch.tensor(eps) def forward(self, hidden_states): return layer_norm(hidden_states, self.weight, self.variance_epsilon) try: from apex.normalization import FusedRMSNorm T5LayerNorm = FusedRMSNorm # noqa logger.info( "Discovered apex.normalization.FusedRMSNorm - will use it instead of T5LayerNorm" ) except ImportError: # using the normal T5LayerNorm pass except Exception: logger.warning("discovered apex but it failed to load, falling back to T5LayerNorm") pass ALL_LAYERNORM_LAYERS.append(T5LayerNorm) class T5DenseActDense(nn.Module): def __init__(self, config: T5Config, prefix, weights): super().__init__() self.wi = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.wi", weights=weights, bias=False ) ### XXX: T5 models do not handle well both f16 and quantization. ### Overidding specifically this layer for that reason. ### https://github.com/huggingface/transformers/blob/main/src/transformers/models/t5/modeling_t5.py#L316 ### https://github.com/huggingface/transformers/issues/20287 _q = config.quantize _dtype = weights.dtype weights.dtype = torch.float32 config.quantize = None self.wo_cast = (torch.float32, _dtype) self.wo = TensorParallelRowLinear.load( config, prefix=f"{prefix}.wo", weights=weights, bias=False ) weights.dtype = _dtype config.quantize = _q self.dropout = nn.Dropout(config.dropout_rate) self.act = ( ACT2FN[config.dense_act_fn] if "gelu" not in config.dense_act_fn else lambda x: torch.nn.functional.gelu(x, approximate="tanh") ) def forward(self, hidden_states): hidden_states = self.wi(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states.to(dtype=self.wo_cast[0]) hidden_states = self.wo(hidden_states) # XXX: Recasting is already done within the layer norm. # Casting back to float16 here modifies results # hidden_states = hidden_states.to(dtype=self.wo_cast[1]) return hidden_states class T5DenseGatedActDense(nn.Module): def __init__(self, config: T5Config, prefix, weights): super().__init__() self.wi_0 = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.wi_0", weights=weights, bias=False ) self.wi_1 = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.wi_1", weights=weights, bias=False ) ### XXX: T5 models do not handle well both f16 and quantization. ### Overidding specifically this layer for that reason. ### https://github.com/huggingface/transformers/blob/main/src/transformers/models/t5/modeling_t5.py#L316 ### https://github.com/huggingface/transformers/issues/20287 _q = config.quantize _dtype = weights.dtype weights.dtype = torch.float32 config.quantize = None self.wo_cast = (torch.float32, _dtype) self.wo = TensorParallelRowLinear.load( config, prefix=f"{prefix}.wo", weights=weights, bias=False ) weights.dtype = _dtype config.quantize = _q self.dropout = nn.Dropout(config.dropout_rate) self.act = ( ACT2FN[config.dense_act_fn] if "gelu" not in config.dense_act_fn else lambda x: torch.nn.functional.gelu(x, approximate="tanh") ) def forward(self, hidden_states): hidden_gelu = self.act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) hidden_states = hidden_states.to(dtype=self.wo_cast[0]) hidden_states = self.wo(hidden_states) # XXX: Recasting is already done within the layer norm. # Casting back to float16 here modifies results # hidden_states = hidden_states.to(dtype=self.wo_cast[1]) return hidden_states class T5LayerFF(nn.Module): def __init__(self, config: T5Config, prefix, weights): super().__init__() if config.is_gated_act: self.DenseReluDense = T5DenseGatedActDense( config, prefix=f"{prefix}.DenseReluDense", weights=weights ) else: self.DenseReluDense = T5DenseActDense( config, prefix=f"{prefix}.DenseReluDense", weights=weights ) self.layer_norm = T5LayerNorm( prefix=f"{prefix}.layer_norm", weights=weights, eps=config.layer_norm_epsilon, ) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states): forwarded_states = self.layer_norm(hidden_states) forwarded_states = self.DenseReluDense(forwarded_states) hidden_states = hidden_states + self.dropout(forwarded_states) return hidden_states class T5Attention(nn.Module): def __init__( self, config: T5Config, prefix, weights, has_relative_attention_bias=False ): super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.relative_attention_max_distance = config.relative_attention_max_distance self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim process_group = weights.process_group # Mesh TensorFlow initialization to avoid scaling before softmax assert self.n_heads % process_group.size() == 0 self.q = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.q", weights=weights, bias=False ) self.k = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.k", weights=weights, bias=False ) self.v = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.v", weights=weights, bias=False ) self.o = TensorParallelRowLinear.load( config, prefix=f"{prefix}.o", weights=weights, bias=False ) if self.n_heads % weights.process_group.size() != 0: raise ValueError( f"`n_heads` must be divisible by `num_shards` (got `n_heads`: {self.n_heads} " f"and `num_shards`: {weights.process_group.size()}" ) self.n_heads = self.n_heads // process_group.size() self.inner_dim = self.inner_dim // process_group.size() if self.has_relative_attention_bias: self.relative_attention_bias = PartialTPEmbedding( prefix=f"{prefix}.relative_attention_bias", weights=weights ) @staticmethod def _relative_position_bucket( relative_position, bidirectional=True, num_buckets=32, max_distance=128 ): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) """ relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min( relative_position, torch.zeros_like(relative_position) ) # now relative_position is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = relative_position < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance relative_position_if_large = max_exact + ( torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) relative_position_if_large = torch.min( relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1), ) relative_buckets += torch.where( is_small, relative_position, relative_position_if_large ) return relative_buckets def compute_bias(self, query_length, key_length, device=None): """Compute binned relative position bias""" if device is None: device = self.relative_attention_bias.weight.device context_position = torch.arange(query_length, dtype=torch.long, device=device)[ :, None ] memory_position = torch.arange(key_length, dtype=torch.long, device=device)[ None, : ] relative_position = ( memory_position - context_position ) # shape (query_length, key_length) relative_position_bucket = self._relative_position_bucket( relative_position, # shape (query_length, key_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, max_distance=self.relative_attention_max_distance, ) values = self.relative_attention_bias( relative_position_bucket ) # shape (query_length, key_length, num_heads) values = values.permute([2, 0, 1]).unsqueeze( 0 ) # shape (1, num_heads, query_length, key_length) return values def forward( self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_value=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False, ): """ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states). """ # Input is (batch_size, seq_length, dim) # Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length) # past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head) batch_size, seq_length = hidden_states.shape[:2] real_seq_length = seq_length if past_key_value is not None: assert ( len(past_key_value) == 2 ), f"past_key_value should have 2 past states: keys and values. Got {len(past_key_value)} past states" real_seq_length += ( past_key_value[0].shape[2] if query_length is None else query_length ) key_length = ( real_seq_length if key_value_states is None else key_value_states.shape[1] ) def shape(states): """projection""" return states.view( batch_size, -1, self.n_heads, self.key_value_proj_dim ).transpose(1, 2) def unshape(states): """reshape""" return ( states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim) ) def project(hidden_states, proj_layer, key_value_states, past_key_value): """projects hidden states correctly to key/query states""" if key_value_states is None: # self-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(hidden_states)) elif past_key_value is None: # cross-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(key_value_states)) if past_key_value is not None: if key_value_states is None: # self-attn # (batch_size, n_heads, key_length, dim_per_head) hidden_states = torch.cat([past_key_value, hidden_states], dim=2) elif past_key_value.shape[2] != key_value_states.shape[1]: # checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning # cross-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(key_value_states)) else: # cross-attn hidden_states = past_key_value return hidden_states # get query states query_states = shape( self.q(hidden_states) ) # (batch_size, n_heads, seq_length, dim_per_head) # get key/value states key_states = project( hidden_states, self.k, key_value_states, past_key_value[0] if past_key_value is not None else None, ) value_states = project( hidden_states, self.v, key_value_states, past_key_value[1] if past_key_value is not None else None, ) # compute scores scores = torch.matmul( query_states, key_states.transpose(3, 2) ) # equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9 if position_bias is None: if not self.has_relative_attention_bias: position_bias = torch.zeros( (1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype, ) else: position_bias = self.compute_bias( real_seq_length, key_length, device=scores.device ) # if key and values are already calculated # we want only the last query position bias if past_key_value is not None: position_bias = position_bias[:, :, -hidden_states.size(1) :, :] if mask is not None: position_bias = ( position_bias + mask ) # (batch_size, n_heads, seq_length, key_length) position_bias_masked = position_bias scores += position_bias_masked attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as( scores ) # (batch_size, n_heads, seq_length, key_length) attn_weights = nn.functional.dropout( attn_weights, p=self.dropout, training=self.training ) # (batch_size, n_heads, seq_length, key_length) # Mask heads if we want to if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_output = unshape( torch.matmul(attn_weights, value_states) ) # (batch_size, seq_length, dim) attn_output = self.o(attn_output) present_key_value_state = ( (key_states, value_states) if (self.is_decoder and use_cache) else None ) outputs = (attn_output,) + (present_key_value_state,) + (position_bias,) if output_attentions: outputs = outputs + (attn_weights,) return outputs class T5LayerSelfAttention(nn.Module): def __init__(self, config, prefix, weights, has_relative_attention_bias=False): super().__init__() self.SelfAttention = T5Attention( config, prefix=f"{prefix}.SelfAttention", weights=weights, has_relative_attention_bias=has_relative_attention_bias, ) self.layer_norm = T5LayerNorm( prefix=f"{prefix}.layer_norm", weights=weights, eps=config.layer_norm_epsilon, ) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.SelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[ 1: ] # add attentions if we output them return outputs class T5LayerCrossAttention(nn.Module): def __init__(self, config, prefix, weights): super().__init__() self.EncDecAttention = T5Attention( config, prefix=f"{prefix}.EncDecAttention", weights=weights, has_relative_attention_bias=False, ) self.layer_norm = T5LayerNorm( prefix=f"{prefix}.layer_norm", weights=weights, eps=config.layer_norm_epsilon, ) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, query_length=None, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.EncDecAttention( normed_hidden_states, mask=attention_mask, key_value_states=key_value_states, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, query_length=query_length, output_attentions=output_attentions, ) layer_output = hidden_states + self.dropout(attention_output[0]) outputs = (layer_output,) + attention_output[ 1: ] # add attentions if we output them return outputs class T5Block(nn.Module): def __init__(self, config, prefix, weights, has_relative_attention_bias: bool): super().__init__() self.is_decoder = config.is_decoder self.layer = nn.ModuleList() self.layer.append( T5LayerSelfAttention( config, prefix=f"{prefix}.layer.0", weights=weights, has_relative_attention_bias=has_relative_attention_bias, ) ) if self.is_decoder: i = 2 self.layer.append( T5LayerCrossAttention( config, prefix=f"{prefix}.layer.1", weights=weights ) ) else: i = 1 self.layer.append( T5LayerFF(config, prefix=f"{prefix}.layer.{i}", weights=weights) ) def forward( self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, cross_attn_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, return_dict=True, ): if past_key_value is not None: if not self.is_decoder: logger.warning( "`past_key_values` is passed to the encoder. Please make sure this is intended." ) expected_num_past_key_values = 2 if encoder_hidden_states is None else 4 if len(past_key_value) != expected_num_past_key_values: raise ValueError( f"There should be {expected_num_past_key_values} past states. " f"{'2 (past / key) for cross attention. ' if expected_num_past_key_values == 4 else ''}" f"Got {len(past_key_value)} past key / value states" ) self_attn_past_key_value = past_key_value[:2] cross_attn_past_key_value = past_key_value[2:] else: self_attn_past_key_value, cross_attn_past_key_value = None, None self_attention_outputs = self.layer[0]( hidden_states, attention_mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=self_attn_past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states, present_key_value_state = self_attention_outputs[:2] attention_outputs = self_attention_outputs[ 2: ] # Keep self-attention outputs and relative position weights # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16: clamp_value = torch.where( torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000, torch.finfo(hidden_states.dtype).max, ) hidden_states = torch.clamp( hidden_states, min=-clamp_value, max=clamp_value ) do_cross_attention = self.is_decoder and encoder_hidden_states is not None if do_cross_attention: # the actual query length is unknown for cross attention # if using past key value states. Need to inject it here if present_key_value_state is not None: query_length = present_key_value_state[0].shape[2] else: query_length = None cross_attention_outputs = self.layer[1]( hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, query_length=query_length, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = cross_attention_outputs[0] # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16: clamp_value = torch.where( torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000, torch.finfo(hidden_states.dtype).max, ) hidden_states = torch.clamp( hidden_states, min=-clamp_value, max=clamp_value ) # Combine self attn and cross attn key value states if present_key_value_state is not None: present_key_value_state = ( present_key_value_state + cross_attention_outputs[1] ) # Keep cross-attention outputs and relative position weights attention_outputs = attention_outputs + cross_attention_outputs[2:] # Apply Feed Forward layer hidden_states = self.layer[-1](hidden_states) # clamp inf values to enable fp16 training if hidden_states.dtype == torch.float16: clamp_value = torch.where( torch.isinf(hidden_states).any(), torch.finfo(hidden_states.dtype).max - 1000, torch.finfo(hidden_states.dtype).max, ) hidden_states = torch.clamp( hidden_states, min=-clamp_value, max=clamp_value ) outputs = (hidden_states,) if use_cache: outputs = outputs + (present_key_value_state,) + attention_outputs else: outputs = outputs + attention_outputs return outputs # hidden-states, present_key_value_states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights) class T5PreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = T5Config def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id assert decoder_start_token_id is not None, ( "self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id." " See T5 docs for more information" ) # shift inputs to the right if is_torch_fx_proxy(input_ids): # Item assignment is not supported natively for proxies. shifted_input_ids = torch.full( input_ids.shape[:-1] + (1,), decoder_start_token_id ) shifted_input_ids = torch.cat( [shifted_input_ids, input_ids[..., :-1]], dim=-1 ) else: shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id assert ( pad_token_id is not None ), "self.model.config.pad_token_id has to be defined." # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids class T5Stack(T5PreTrainedModel): def __init__(self, config, prefix, weights, embed_tokens): super().__init__(config) self.is_decoder = config.is_decoder self.embed_tokens = embed_tokens self.block = nn.ModuleList( [ T5Block( config, prefix=f"{prefix}.block.{layer_id}", weights=weights, has_relative_attention_bias=(layer_id == 0), ) for layer_id in range(config.num_layers) ] ) self.final_layer_norm = T5LayerNorm( prefix=f"{prefix}.final_layer_norm", weights=weights, eps=config.layer_norm_epsilon, ) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): # Model parallel use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = ( output_attentions if output_attentions is not None else self.config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) if input_ids is not None and inputs_embeds is not None: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError( f"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time" ) elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError( f"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds" ) if inputs_embeds is None: assert ( self.embed_tokens is not None ), "You have to initialize the model with valid token embeddings" inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape # required mask seq length can be calculated via length of past mask_seq_length = ( past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length ) if use_cache is True: assert ( self.is_decoder ), f"`use_cache` can only be set to `True` if {self} is used as a decoder" if attention_mask is None: attention_mask = torch.ones( batch_size, mask_seq_length, device=inputs_embeds.device ) if ( self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is not None ): encoder_seq_length = encoder_hidden_states.shape[1] encoder_attention_mask = torch.ones( batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long, ) # initialize past_key_values with `None` if past does not exist if past_key_values is None: past_key_values = [None] * len(self.block) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask = self.get_extended_attention_mask( attention_mask, input_shape ) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.is_decoder and encoder_hidden_states is not None: ( encoder_batch_size, encoder_sequence_length, _, ) = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones( encoder_hidden_shape, device=inputs_embeds.device ) encoder_extended_attention_mask = self.invert_attention_mask( encoder_attention_mask ) else: encoder_extended_attention_mask = None # Prepare head mask if needed head_mask = self.get_head_mask(head_mask, self.config.num_layers) cross_attn_head_mask = self.get_head_mask( cross_attn_head_mask, self.config.num_layers ) present_key_value_states = () if use_cache else None all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if (output_attentions and self.is_decoder) else None position_bias = None encoder_decoder_position_bias = None hidden_states = self.dropout(inputs_embeds) for i, (layer_module, past_key_value) in enumerate( zip(self.block, past_key_values) ): layer_head_mask = head_mask[i] cross_attn_layer_head_mask = cross_attn_head_mask[i] # Model parallel if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states, attention_mask=extended_attention_mask, position_bias=position_bias, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, encoder_decoder_position_bias=encoder_decoder_position_bias, layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) # layer_outputs is a tuple with: # hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights) if use_cache is False: layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:] hidden_states, present_key_value_state = layer_outputs[:2] # We share the position biases between the layers - the first layer store them # layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights), # (cross-attention position bias), (cross-attention weights) position_bias = layer_outputs[2] if self.is_decoder and encoder_hidden_states is not None: encoder_decoder_position_bias = layer_outputs[ 4 if output_attentions else 3 ] # append next layer key value states if use_cache: present_key_value_states = present_key_value_states + ( present_key_value_state, ) if output_attentions: all_attentions = all_attentions + (layer_outputs[3],) if self.is_decoder: all_cross_attentions = all_cross_attentions + (layer_outputs[5],) hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, present_key_value_states, all_hidden_states, all_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=present_key_value_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) class T5ForConditionalGeneration(T5PreTrainedModel): def __init__(self, config: T5Config, weights): super().__init__(config) self.model_dim = config.d_model self.shared = TensorParallelEmbedding(prefix="shared", weights=weights) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = T5Stack( config=encoder_config, prefix="encoder", weights=weights, embed_tokens=self.shared, ) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = T5Stack( config=decoder_config, prefix="decoder", weights=weights, embed_tokens=self.shared, ) try: self.lm_head = SpeculativeHead.load( config, prefix="lm_head", weights=weights ) except RuntimeError: # Some models like t5-small were saved with shared weights unlike flan # Since they are declared as the same arch we have no choice but hope # that this is OK instead of using a proper flag. self.lm_head = SpeculativeHead.load( config, prefix="shared", weights=weights ) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]: use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask if head_mask is not None and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask # Encode if needed (training, first prediction pass) if encoder_outputs is None: # Convert encoder inputs in embeddings if needed encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] if ( labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None ): # get decoder inputs from shifting lm labels to the right decoder_input_ids = self._shift_right(labels) # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = decoder_outputs[0] if self.config.tie_word_embeddings: # Rescale output before projecting on vocab # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 sequence_output = sequence_output * (self.model_dim**-0.5) logits, speculative_logits = self.lm_head(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-100) # move labels to correct device to enable PP labels = labels.to(logits.device) loss = loss_fct(logits.view(-1, logits.size(-1)), labels.view(-1)) # TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666 if not return_dict: output = (logits,) + decoder_outputs[1:] + encoder_outputs return ((loss,) + output) if loss is not None else output return ( Seq2SeqLMOutput( loss=loss, logits=logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ), speculative_logits, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, head_mask=None, decoder_head_mask=None, decoder_attention_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs, ): # cut decoder_input_ids if past is used if past_key_values is not None: input_ids = input_ids[:, -1:] return { "decoder_input_ids": input_ids, "past_key_values": past_key_values, "encoder_outputs": encoder_outputs, "attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "decoder_attention_mask": decoder_attention_mask, "cross_attn_head_mask": cross_attn_head_mask, "use_cache": use_cache, } def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return self._shift_right(labels) def _reorder_cache(self, past_key_values, beam_idx): # if decoder past is not included in output # speedy decoding is disabled and no need to reorder if past_key_values is None: logger.warning( "You might want to consider setting `use_cache=True` to speed up decoding" ) return past_key_values reordered_decoder_past = () for layer_past_states in past_key_values: # get the correct batch idx from layer past batch dim # batch dim of `past` is at 2nd position reordered_layer_past_states = () for layer_past_state in layer_past_states: # need to set correct `past` for each of the four key / value states reordered_layer_past_states = reordered_layer_past_states + ( layer_past_state.index_select( 0, beam_idx.to(layer_past_state.device) ), ) assert reordered_layer_past_states[0].shape == layer_past_states[0].shape assert len(reordered_layer_past_states) == len(layer_past_states) reordered_decoder_past = reordered_decoder_past + ( reordered_layer_past_states, ) return reordered_decoder_past
text-generation-inference/server/text_generation_server/models/custom_modeling/t5_modeling.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/t5_modeling.py", "repo_id": "text-generation-inference", "token_count": 22698 }
260
from text_generation_server.utils.convert import convert_file, convert_files from text_generation_server.utils.dist import initialize_torch_distributed from text_generation_server.utils.weights import Weights from text_generation_server.utils.peft import download_and_unload_peft from text_generation_server.utils.hub import ( weight_files, weight_hub_files, download_weights, EntryNotFoundError, LocalEntryNotFoundError, RevisionNotFoundError, ) from text_generation_server.utils.tokens import ( NextTokenChooser, HeterogeneousNextTokenChooser, StoppingCriteria, StopSequenceCriteria, FinishReason, Sampling, Greedy, ) __all__ = [ "convert_file", "convert_files", "initialize_torch_distributed", "weight_files", "weight_hub_files", "download_weights", "download_and_unload_peft", "EntryNotFoundError", "HeterogeneousNextTokenChooser", "LocalEntryNotFoundError", "RevisionNotFoundError", "Greedy", "NextTokenChooser", "Sampling", "StoppingCriteria", "StopSequenceCriteria", "FinishReason", "Weights", ]
text-generation-inference/server/text_generation_server/utils/__init__.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/utils/__init__.py", "repo_id": "text-generation-inference", "token_count": 417 }
261
import re from typing import List, Optional, Tuple, Set, Union import torch from text_generation_server.pb import generate_pb2 from text_generation_server.pb.generate_pb2 import FinishReason, GrammarType from text_generation_server.utils.logits_process import ( FrequencyPenaltyLogitsProcessor, GrammarLogitProcessor, HeterogeneousProcessorWrapper, HeterogeneousRepetitionPenaltyLogitsProcessor, HeterogeneousFrequencyPenaltyLogitsProcessor, HeterogeneousTemperatureLogitsWarper, HeterogeneousTopKLogitsWarper, HeterogeneousTopPLogitsWarper, HeterogeneousTypicalLogitsWarper, HeterogeneousGrammarLogitProcessor, static_warper, ) from text_generation_server.utils.watermark import WatermarkLogitsProcessor from transformers import PreTrainedTokenizerBase, RepetitionPenaltyLogitsProcessor class NextTokenChooser: def __init__( self, watermark: bool = False, temperature: float = 1.0, repetition_penalty: float = 1.0, frequency_penalty: float = 0.0, top_k: Optional[int] = None, top_p: Optional[float] = None, typical_p: Optional[float] = None, do_sample: bool = False, seed: int = 0, device: str = "cpu", tokenizer: Optional[PreTrainedTokenizerBase] = None, grammar: str = "", grammar_type: GrammarType = GrammarType.GRAMMAR_TYPE_NONE, fsm_grammar_state: int = 0, ): self.watermark_processor = ( WatermarkLogitsProcessor(device=device) if watermark else None ) self.repetition_processor = ( RepetitionPenaltyLogitsProcessor(penalty=repetition_penalty) if repetition_penalty and repetition_penalty != 1.0 else None ) self.frequency_processor = ( FrequencyPenaltyLogitsProcessor(penalty=frequency_penalty) if frequency_penalty and frequency_penalty != 0.0 else None ) self.grammar_processor = ( GrammarLogitProcessor(tokenizer, device, grammar, grammar_type) if grammar != "" else None ) self.tokenizer = tokenizer has_warpers = ( (temperature is not None and temperature != 1.0) or (top_k is not None and top_k != 0) or (top_p is not None and top_p < 1.0) or (typical_p is not None and typical_p < 1.0) ) if has_warpers: self.static_warper = static_warper( temperature=temperature, top_k=top_k, top_p=top_p, typical_p=typical_p ) else: self.static_warper = None sampling = do_sample or has_warpers self.choice = Sampling(seed, device) if sampling else Greedy() self.fsm_grammar_state = fsm_grammar_state self.grammar = grammar def __call__(self, input_ids, scores): if self.watermark_processor is not None: scores = self.watermark_processor(input_ids, scores) if self.repetition_processor is not None: scores = self.repetition_processor(input_ids, scores) if self.frequency_processor is not None: scores = self.frequency_processor(input_ids, scores) if self.grammar_processor is not None: scores = self.grammar_processor(scores, self.fsm_grammar_state) if self.static_warper is None: next_logprob = torch.log_softmax(scores, -1) else: scores, next_logprob = self.static_warper(scores) next_id = self.choice(scores[-1]).view(1, 1) return next_id, next_logprob def advance_grammar(self, next_id: int): if self.grammar_processor is not None: self.fsm_grammar_state = self.grammar_processor.advance( next_id, self.fsm_grammar_state ) return self @classmethod def from_pb( cls, pb: generate_pb2.NextTokenChooserParameters, device: torch.device, tokenizer: PreTrainedTokenizerBase, ) -> "NextTokenChooser": return NextTokenChooser( watermark=pb.watermark, temperature=pb.temperature, repetition_penalty=pb.repetition_penalty, frequency_penalty=pb.frequency_penalty, top_k=pb.top_k, top_p=pb.top_p, typical_p=pb.typical_p, do_sample=pb.do_sample, seed=pb.seed, device=device, tokenizer=tokenizer, grammar=pb.grammar, grammar_type=pb.grammar_type, ) class StopSequenceCriteria: def __init__(self, stop_sequence: str): stop_sequence = re.escape(stop_sequence) self.regex = re.compile(f"{stop_sequence}$") def __call__(self, output: str) -> bool: if self.regex.findall(output): return True return False class StoppingCriteria: def __init__( self, eos_token_ids: Optional[Union[Set[int], int]], stop_sequence_criterias: List[StopSequenceCriteria], max_new_tokens: int = 20, ignore_eos_token: bool = False, ): if eos_token_ids is None: eos_token_ids = set() elif isinstance(eos_token_ids, int): eos_token_ids = set([eos_token_ids]) elif isinstance(eos_token_ids, set): eos_token_ids = eos_token_ids else: raise RuntimeError( f"eos_token_ids is of invalid type {type(eos_token_ids)}, expected int, None or set[int]" ) self.eos_token_ids = eos_token_ids self.stop_sequence_criterias = stop_sequence_criterias self.max_new_tokens = max_new_tokens self.current_tokens = 0 self.current_output = "" self.ignore_eos_token = ignore_eos_token def __call__(self, last_token: int, last_output: str) -> Tuple[bool, Optional[str]]: self.current_tokens += 1 if self.current_tokens >= self.max_new_tokens: return True, FinishReason.FINISH_REASON_LENGTH if isinstance(last_token, torch.Tensor): last_token = last_token.item() if not self.ignore_eos_token and last_token in self.eos_token_ids: return True, FinishReason.FINISH_REASON_EOS_TOKEN if self.stop_sequence_criterias: self.current_output += last_output # There is no need to keep an output that is too long if len(self.current_output) > 300: # Slice to -200 to avoid doing it all the time self.current_output = self.current_output[-200:] for stop_sequence_criteria in self.stop_sequence_criterias: if stop_sequence_criteria(self.current_output): return True, FinishReason.FINISH_REASON_STOP_SEQUENCE return False, None @classmethod def from_pb( cls, pb: generate_pb2.StoppingCriteriaParameters, tokenizer: PreTrainedTokenizerBase, ) -> "StoppingCriteria": stop_sequence_criterias = [ StopSequenceCriteria(sequence) for sequence in pb.stop_sequences ] # TODO Hack because eos_token_id cannot be what we want. eos_token_id = getattr(tokenizer, "_eos_token_ids", tokenizer.eos_token_id) return StoppingCriteria( eos_token_id, stop_sequence_criterias, pb.max_new_tokens, pb.ignore_eos_token, ) def create_n_gram_speculation( input_ids: torch.Tensor, next_ids: torch.Tensor, accepted_ids: torch.Tensor, speculate: int, verbose: bool, ): # Very trivial approach, find first match in the string. # This is much less refined than actual n-gram but seems to work # relatively OK in grounded mode and is by far much faster with # much less worst case complexity as everything happens on device. B = accepted_ids.shape[0] device = input_ids.device seeds = next_ids[accepted_ids.cumsum(dim=-1) - 1] indices = (input_ids == seeds.unsqueeze(-1)).max(dim=1).indices + 1 all_indices = indices.unsqueeze(-1).expand(B, speculate) + torch.arange( speculate, device=device ) all_indices = torch.clamp(all_indices, max=input_ids.shape[1] - 1) speculative_ids = input_ids.gather(dim=-1, index=all_indices) return speculative_ids class HeterogeneousNextTokenChooser: def __init__( self, dtype: torch.dtype, device: torch.device, watermark: List[bool], temperature: List[float], repetition_penalty: List[float], frequency_penalty: List[float], top_k: List[int], top_p: List[float], typical_p: List[float], do_sample: List[bool], seeds: List[int], tokenizer: PreTrainedTokenizerBase, grammars: List[str], grammar_types: List[int], fsm_grammar_states=List[int], ): warpers = [] self.watermark_processor = ( HeterogeneousProcessorWrapper( { i: WatermarkLogitsProcessor(device=device) for i, do_watermark in enumerate(watermark) if do_watermark } ) if any(watermark) else None ) self.repetition_processor = ( HeterogeneousRepetitionPenaltyLogitsProcessor( repetition_penalty, dtype, device ) if any([x != 1.0 for x in repetition_penalty]) else None ) self.frequency_processor = ( HeterogeneousFrequencyPenaltyLogitsProcessor( frequency_penalty, dtype, device ) if any([x != 0.0 for x in frequency_penalty]) else None ) self.grammar_processor = ( HeterogeneousGrammarLogitProcessor( tokenizer, device, grammars, grammar_types ) if any([grammar != "" for grammar in grammars]) else None ) if any(x != 1.0 for x in temperature): do_sample = [ sample or x != 1.0 for x, sample in zip(temperature, do_sample) ] warpers.append( HeterogeneousTemperatureLogitsWarper(temperature, dtype, device) ) if any(x != 0 for x in top_k): do_sample = [sample or x != 0 for x, sample in zip(top_k, do_sample)] warpers.append(HeterogeneousTopKLogitsWarper(top_k, device)) if any(x < 1.0 for x in top_p): do_sample = [sample or x < 1.0 for x, sample in zip(top_p, do_sample)] warpers.append(HeterogeneousTopPLogitsWarper(top_p, dtype, device)) if any(x < 1.0 for x in typical_p): do_sample = [sample or x < 1.0 for x, sample in zip(typical_p, do_sample)] warpers.append(HeterogeneousTypicalLogitsWarper(typical_p, dtype, device)) self.warpers = warpers if any(do_sample): self.choice = HeterogeneousSampling(do_sample, seeds, device) else: self.choice = Greedy() self.seeds = seeds self.do_sample = do_sample self.dtype = dtype self.device = device self.tokenizer = tokenizer self.fsm_grammar_states = fsm_grammar_states self.grammars = grammars self.grammar_types = grammar_types def __call__( self, input_ids: torch.Tensor, scores: torch.Tensor, speculate: int, speculated_ids: Optional[torch.Tensor] = None, speculative_scores: Optional[torch.Tensor] = None, verbose=False, ): if speculated_ids is not None: B = scores.shape[0] // (speculated_ids.shape[1] + 1) S = speculated_ids.shape[1] + 1 scores = scores.view(B, S, -1) else: B = scores.shape[0] S = 1 scores = scores.view(B, S, -1) next_ids = torch.zeros((B, S), device=scores.device, dtype=torch.long) for j in range(S): _scores = scores[:, j] if self.watermark_processor is not None: _scores = self.watermark_processor(input_ids, _scores) if self.repetition_processor is not None: _scores = self.repetition_processor(input_ids, _scores) if self.frequency_processor is not None: _scores = self.frequency_processor(input_ids, _scores) if self.grammar_processor is not None: _scores = self.grammar_processor(_scores, self.fsm_grammar_states) for warper in self.warpers: _scores = warper(input_ids, _scores) _next_ids = self.choice(_scores) scores[:, j] = _scores next_ids[:, j] = _next_ids next_ids = next_ids.view(B * S) allscores = scores.view(B * S, -1) alllogprobs = torch.log_softmax(allscores, -1) if speculated_ids is not None: accepted_ids = [] B = next_ids.shape[0] // (speculated_ids.shape[1] + 1) S = speculated_ids.shape[1] + 1 indices = [] for i in range(B): _next_ids = next_ids[i * S : (i + 1) * S] _speculated_ids = speculated_ids[i] validate_speculative = _next_ids[:-1] == _speculated_ids index = i * S accepted = 1 # First is always valid indices.append(index) for valid in validate_speculative.tolist(): if valid: index += 1 accepted += 1 indices.append(index) else: break accepted_ids.append(accepted) accepted_ids = torch.tensor( accepted_ids, device=input_ids.device, dtype=input_ids.dtype ) next_ids = next_ids[indices] logprobs = alllogprobs[indices] indices = torch.arange(B, device=input_ids.device) * S if speculative_scores is not None: speculative_scores = speculative_scores[indices + accepted_ids - 1] else: accepted_ids = torch.ones_like(next_ids) logprobs = alllogprobs next_logprobs = torch.gather(logprobs, 1, next_ids.view(-1, 1)).view(-1) if speculate > 0: if speculative_scores is not None: # Medusa provided some scores speculative_ids = Greedy()(speculative_scores) else: # n-gram speculative_ids = create_n_gram_speculation( input_ids, next_ids, accepted_ids, speculate, verbose ) else: speculative_ids = None return next_ids, next_logprobs, alllogprobs, accepted_ids, speculative_ids def advance_grammar(self, next_ids: List[int]): if self.grammar_processor is not None: other_new_states = self.grammar_processor.advance_batch( next_ids, self.fsm_grammar_states ) self.fsm_grammar_states = other_new_states return self def advance_grammar_single(self, grammar_state_index: int, next_id: int): if self.grammar_processor is not None: self.fsm_grammar_states[grammar_state_index] = ( self.grammar_processor.advance_at_index( next_id, self.fsm_grammar_states[grammar_state_index], grammar_state_index, ) ) return self def filter(self, indices): if self.watermark_processor is not None: self.watermark_processor = self.watermark_processor.filter(indices) if self.repetition_processor is not None: self.repetition_processor = self.repetition_processor.filter(indices) if self.frequency_processor is not None: self.frequency_processor = self.frequency_processor.filter(indices) if self.grammar_processor is not None: self.grammar_processor = self.grammar_processor.filter(indices) filtered_warpers = [] for warper in self.warpers: filtered_warper = warper.filter(indices) if filtered_warper is not None: filtered_warpers.append(filtered_warper) self.warpers = filtered_warpers self.seeds = [self.seeds[i] for i in indices] self.do_sample = [self.do_sample[i] for i in indices] new_grammars = [] new_fsm_grammar_states = [] new_grammar_types = [] for i in indices: new_grammars.append(self.grammars[i]) new_fsm_grammar_states.append(self.fsm_grammar_states[i]) new_grammar_types.append(self.grammar_types[i]) self.grammars = new_grammars self.fsm_grammar_states = new_fsm_grammar_states self.grammar_types = new_grammar_types if any(self.do_sample): self.choice.filter(indices) else: self.choice = Greedy() return self @classmethod def from_pb( cls, pb: List[generate_pb2.NextTokenChooserParameters], dtype: torch.dtype, device: torch.device, tokenizer: PreTrainedTokenizerBase, fsm_grammar_states: Optional[List[int]] = None, ) -> "HeterogeneousNextTokenChooser": return HeterogeneousNextTokenChooser( watermark=[pb_.watermark for pb_ in pb], temperature=[pb_.temperature for pb_ in pb], repetition_penalty=[pb_.repetition_penalty for pb_ in pb], frequency_penalty=[pb_.frequency_penalty for pb_ in pb], top_k=[pb_.top_k for pb_ in pb], top_p=[pb_.top_p for pb_ in pb], typical_p=[pb_.typical_p for pb_ in pb], do_sample=[pb_.do_sample for pb_ in pb], seeds=[pb_.seed for pb_ in pb], device=device, dtype=dtype, tokenizer=tokenizer, grammars=[pb_.grammar for pb_ in pb], grammar_types=[pb_.grammar_type for pb_ in pb], fsm_grammar_states=( fsm_grammar_states if fsm_grammar_states else [0] * len(pb) ), ) class Sampling: def __init__(self, seed: int, device: str = "cpu"): self.generator = torch.Generator(device) self.generator.manual_seed(seed) self.seed = seed def __call__(self, logits): probs = torch.nn.functional.softmax(logits, -1) # Avoid GPU<->CPU sync done by torch multinomial # See: https://github.com/pytorch/pytorch/blob/925a3788ec5c06db62ca732a0e9425a26a00916f/aten/src/ATen/native/Distributions.cpp#L631-L637 q = torch.empty_like(probs).exponential_(1, generator=self.generator) return probs.div_(q).argmax() class Greedy: def __call__(self, logits): return logits.argmax(dim=-1) class HeterogeneousSampling: r""" Mixed greedy and probabilistic sampling. Compute both and pick the right one for each sample. """ def __init__(self, do_sample: List[bool], seeds: List[int], device: torch.device): self.seeds = seeds self.greedy_indices = [] self.sampling_mapping = {} for i, (sample, seed) in enumerate(zip(do_sample, seeds)): if sample: self.sampling_mapping[i] = Sampling(seed, device) else: self.greedy_indices.append(i) self.greedy = Greedy() def __call__(self, logits): out = torch.empty(logits.shape[0], dtype=torch.int64, device=logits.device) if self.greedy_indices: # Computing for all indices is faster than slicing torch.argmax(logits, -1, out=out) for i, sampling in self.sampling_mapping.items(): out[i] = sampling(logits[i]) return out def filter(self, indices): new_greedy_indices = [] new_sampling_mapping = {} for i, idx in enumerate(indices): if idx in self.sampling_mapping: new_sampling_mapping[i] = self.sampling_mapping[idx] else: new_greedy_indices.append(i) self.greedy_indices = new_greedy_indices self.sampling_mapping = new_sampling_mapping return self def batch_top_tokens( top_n_tokens: List[int], top_n_tokens_tensor: torch.Tensor, logprobs: torch.Tensor, accepted_ids: torch.Tensor, ) -> Tuple[List[List[List[int]]], List[List[List[float]]]]: """Find the top n most likely tokens for a batch of generations. When multiple tokens have equal probabilities and they don't all fit, the remaining tokens are also returned. """ max_top_n = max(top_n_tokens) # Early exit when top_n_tokens is not used if max_top_n == 0: return [[[]]] * len(top_n_tokens), [[[]]] * len(top_n_tokens) batch_size = accepted_ids.shape[0] speculate_size = logprobs.shape[0] // batch_size top_n_tokens_tensor = top_n_tokens_tensor.repeat_interleave(speculate_size) # Ensure top_n doesn't exceed vocab size top_n_tokens = [ min(tok, logprobs.size(-1)) for tok in top_n_tokens for _ in range(speculate_size) ] # Parallel kthvalue adapted from https://discuss.pytorch.org/t/how-to-efficiently-get-the-k-th-largest-values-in-parallel/160529/2 # Sorted topk is faster than torch.sort() since we only need a small subset sorted_top_k = torch.topk(logprobs, k=max_top_n, dim=-1, sorted=True).values nth_highest = torch.gather( sorted_top_k, 1, (top_n_tokens_tensor - 1).clip(min=0).unsqueeze(1) ) nth_highest[nth_highest == -float("inf")] = torch.finfo(logprobs.dtype).min # Find the new "fuzzy" top n values top_n_indices = (logprobs >= nth_highest).nonzero() _, top_n_ishes = torch.unique_consecutive(top_n_indices[:, 0], return_counts=True) k = 1 if top_n_ishes.numel() == 0 else top_n_ishes.max() # Take a new topk for these new max n values top_k = torch.topk(logprobs, k=k, dim=1, sorted=True) top_n_ishes = top_n_ishes.tolist() top_indices = top_k.indices.tolist() top_values = top_k.values.tolist() batch_top_token_ids = [] batch_top_token_logprobs = [] accepted_ids_list = accepted_ids.tolist() for i, n_accepted_ids in enumerate(accepted_ids_list): start = speculate_size * i stop = speculate_size * (i + 1) _top_indices = top_indices[start:stop] _top_values = top_values[start:stop] _top_n_ishes = top_n_ishes[start:stop] _top_n_tokens = top_n_tokens[start:stop] _top_indices = _top_indices[:n_accepted_ids] _top_values = _top_values[:n_accepted_ids] _top_n_ishes = _top_n_ishes[:n_accepted_ids] _top_n_tokens = _top_n_tokens[:n_accepted_ids] row_top_token_ids = [] row_top_token_logprobs = [] for idxs, vals, n, req_n in zip( _top_indices, _top_values, _top_n_ishes, _top_n_tokens ): indices = idxs[:n] if req_n > 0 else [] values = vals[:n] if req_n > 0 else [] row_top_token_ids.append(indices) row_top_token_logprobs.append(values) batch_top_token_ids.append(row_top_token_ids) batch_top_token_logprobs.append(row_top_token_logprobs) return batch_top_token_ids, batch_top_token_logprobs
text-generation-inference/server/text_generation_server/utils/tokens.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/utils/tokens.py", "repo_id": "text-generation-inference", "token_count": 11317 }
262
[package] authors = ["Nicolas Patry <nicolas@huggingface.co>"] edition = "2021" name = "node" version = "0.20.0-dev.0" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [lib] crate-type = ["cdylib"] [dependencies] napi = "2" napi-derive = "2" serde = { version = "1.0.163", features = ["derive"] } tokenizers = { path = "../../tokenizers/" } [build-dependencies] napi-build = "2" [profile.release] lto = true
tokenizers/bindings/node/Cargo.toml/0
{ "file_path": "tokenizers/bindings/node/Cargo.toml", "repo_id": "tokenizers", "token_count": 200 }
263
import { prependNormalizer, stripAccentsNormalizer, stripNormalizer } from '../../' describe('stripNormalizer', () => { it('instantiates with no parameters', () => { const normalizer = stripNormalizer() expect(normalizer.constructor.name).toEqual('Normalizer') }) it('accepts `undefined` as first parameter', () => { expect(stripNormalizer(undefined)).toBeDefined() }) it('accepts `undefined` as second parameter', () => { expect(stripNormalizer(false, undefined)).toBeDefined() }) it('instantiates with one parameter', () => { const normalizer = stripNormalizer(false) expect(normalizer.constructor.name).toEqual('Normalizer') }) it('instantiates with two parameters', () => { const normalizer = stripNormalizer(false, true) expect(normalizer.constructor.name).toEqual('Normalizer') }) it('prepend instantiates with one parameter', () => { const normalizer = prependNormalizer('_') expect(normalizer.constructor.name).toEqual('Normalizer') expect(normalizer.normalizeString('Hello')).toEqual('_Hello') }) it('can normalize strings', () => { const normalizer = stripNormalizer() expect(normalizer.normalizeString(' Hello there ')).toEqual('Hello there') }) }) describe('stripAccentsNormalizer', () => { it('initialize', () => { const normalizer = stripAccentsNormalizer() expect(normalizer.constructor.name).toEqual('Normalizer') }) })
tokenizers/bindings/node/lib/bindings/normalizers.test.ts/0
{ "file_path": "tokenizers/bindings/node/lib/bindings/normalizers.test.ts", "repo_id": "tokenizers", "token_count": 468 }
264
{ "name": "tokenizers-linux-arm-gnueabihf", "version": "0.13.4-rc1", "os": [ "linux" ], "cpu": [ "arm" ], "main": "tokenizers.linux-arm-gnueabihf.node", "files": [ "tokenizers.linux-arm-gnueabihf.node" ], "description": "Tokenizers platform specific bindings", "keywords": [ "napi-rs", "NAPI", "N-API", "Rust", "node-addon", "node-addon-api" ], "license": "MIT", "engines": { "node": ">= 10" }, "publishConfig": { "registry": "https://registry.npmjs.org/", "access": "public" }, "repository": "tokenizers" }
tokenizers/bindings/node/npm/linux-arm-gnueabihf/package.json/0
{ "file_path": "tokenizers/bindings/node/npm/linux-arm-gnueabihf/package.json", "repo_id": "tokenizers", "token_count": 278 }
265
tab_spaces = 2
tokenizers/bindings/node/rustfmt.toml/0
{ "file_path": "tokenizers/bindings/node/rustfmt.toml", "repo_id": "tokenizers", "token_count": 7 }
266
export type TextInputSequence = string export type PreTokenizedInputSequence = string[] export type InputSequence = TextInputSequence | PreTokenizedInputSequence export type TextEncodeInput = TextInputSequence | [TextInputSequence, TextInputSequence] export type PreTokenizedEncodeInput = PreTokenizedInputSequence | [PreTokenizedInputSequence, PreTokenizedInputSequence] export type EncodeInput = TextEncodeInput | PreTokenizedEncodeInput
tokenizers/bindings/node/types.ts/0
{ "file_path": "tokenizers/bindings/node/types.ts", "repo_id": "tokenizers", "token_count": 114 }
267
<jupyter_start><jupyter_code>!wget https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt -O /tmp/bert-base-uncased-vocab.txt from tokenizers import BertWordPieceTokenizer from tokenizers.tools import EncodingVisualizer EncodingVisualizer.unk_token_regex.search("aaa[udsnk]aaa") text = """Mathias Bynens 'Z͑ͫ̓ͪ̂ͫ̽͏̴̙̤̞͉͚̯̞̠͍A̴̵̜̰͔ͫ͗͢L̠ͨͧͩ͘G̴̻͈͍͔̹̑͗̎̅͛́Ǫ̵̹̻̝̳͂̌̌͘!͖̬̰̙̗̿̋ͥͥ̂ͣ̐́́͜͞': Whenever you’re working on a piece of JavaScript code that deals with strings or regular expressions in some way, just add a unit test that contains a pile of poo (💩) in a string, 💩💩💩💩💩💩💩💩💩💩💩💩 and see if anything breaks. It’s a quick, fun, and easy way to see if your code supports astral symbols. Once you’ve found a Unicode-related bug in your code, all you need to do is apply the techniques discussed in this post to fix it.""" tokenizer = BertWordPieceTokenizer("/tmp/bert-base-uncased-vocab.txt", lowercase=True) visualizer = EncodingVisualizer(tokenizer=tokenizer)<jupyter_output><empty_output><jupyter_text>Visualizing Tokens With No Annotations<jupyter_code>visualizer(text)<jupyter_output><empty_output><jupyter_text>Visualizing Tokens With Aligned AnnotationsFirst we make some annotations with the Annotation class<jupyter_code>from tokenizers.tools import Annotation anno1 = Annotation(start=0, end=2, label="foo") anno2 = Annotation(start=2, end=4, label="bar") anno3 = Annotation(start=6, end=8, label="poo") anno4 = Annotation(start=9, end=12, label="shoe") annotations=[ anno1, anno2, anno3, anno4, Annotation(start=23, end=30, label="random tandem bandem sandem landem fandom"), Annotation(start=63, end=70, label="foo"), Annotation(start=80, end=95, label="bar"), Annotation(start=120, end=128, label="bar"), Annotation(start=152, end=155, label="poo"), ] visualizer(text,annotations=annotations)<jupyter_output><empty_output><jupyter_text>Using A Custom Annotation FormatEvery system has its own representation of annotations. That's why we can instantiate the EncodingVisualizer with a convertion function.<jupyter_code>funnyAnnotations = [dict(startPlace=i,endPlace=i+3,theTag=str(i)) for i in range(0,20,4)] funnyAnnotations converter = lambda funny: Annotation(start=funny['startPlace'], end=funny['endPlace'], label=funny['theTag']) visualizer = EncodingVisualizer(tokenizer=tokenizer, default_to_notebook=True, annotation_converter=converter) visualizer(text, annotations=funnyAnnotations)<jupyter_output><empty_output><jupyter_text>Trying with Roberta<jupyter_code>!wget "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-vocab.json" -O /tmp/roberta-base-vocab.json !wget "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-merges.txt" -O /tmp/roberta-base-merges.txt from tokenizers import ByteLevelBPETokenizer roberta_tokenizer = ByteLevelBPETokenizer.from_file('/tmp/roberta-base-vocab.json', '/tmp/roberta-base-merges.txt') roberta_visualizer = EncodingVisualizer(tokenizer=roberta_tokenizer, default_to_notebook=True) roberta_visualizer(text, annotations=annotations)<jupyter_output><empty_output>
tokenizers/bindings/python/examples/using_the_visualizer.ipynb/0
{ "file_path": "tokenizers/bindings/python/examples/using_the_visualizer.ipynb", "repo_id": "tokenizers", "token_count": 1221 }
268
# Generated content DO NOT EDIT from .. import pre_tokenizers PreTokenizer = pre_tokenizers.PreTokenizer BertPreTokenizer = pre_tokenizers.BertPreTokenizer ByteLevel = pre_tokenizers.ByteLevel CharDelimiterSplit = pre_tokenizers.CharDelimiterSplit Digits = pre_tokenizers.Digits Metaspace = pre_tokenizers.Metaspace Punctuation = pre_tokenizers.Punctuation Sequence = pre_tokenizers.Sequence Split = pre_tokenizers.Split UnicodeScripts = pre_tokenizers.UnicodeScripts Whitespace = pre_tokenizers.Whitespace WhitespaceSplit = pre_tokenizers.WhitespaceSplit
tokenizers/bindings/python/py_src/tokenizers/pre_tokenizers/__init__.py/0
{ "file_path": "tokenizers/bindings/python/py_src/tokenizers/pre_tokenizers/__init__.py", "repo_id": "tokenizers", "token_count": 177 }
269
use pyo3::exceptions; use pyo3::prelude::*; use pyo3::types::*; use tk::tokenizer::{Offsets, PaddingDirection}; use tk::utils::truncation::TruncationDirection; use tokenizers as tk; use crate::error::{deprecation_warning, PyError}; /// The :class:`~tokenizers.Encoding` represents the output of a :class:`~tokenizers.Tokenizer`. #[pyclass(dict, module = "tokenizers", name = "Encoding")] #[repr(transparent)] pub struct PyEncoding { pub encoding: tk::tokenizer::Encoding, } impl From<tk::tokenizer::Encoding> for PyEncoding { fn from(v: tk::tokenizer::Encoding) -> Self { Self { encoding: v } } } #[pymethods] impl PyEncoding { #[new] #[pyo3(text_signature = None)] fn new() -> Self { Self { encoding: tk::tokenizer::Encoding::default(), } } fn __getstate__(&self, py: Python) -> PyResult<PyObject> { let data = serde_json::to_string(&self.encoding).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to pickle Encoding: {}", e )) })?; Ok(PyBytes::new_bound(py, data.as_bytes()).to_object(py)) } fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> { match state.extract::<&PyBytes>(py) { Ok(s) => { self.encoding = serde_json::from_slice(s.as_bytes()).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to unpickle Encoding: {}", e )) })?; Ok(()) } Err(e) => Err(e), } } fn __repr__(&self) -> PyResult<String> { Ok(format!( "Encoding(num_tokens={}, attributes=[ids, type_ids, tokens, offsets, \ attention_mask, special_tokens_mask, overflowing])", self.encoding.get_ids().len() )) } fn __len__(&self) -> PyResult<usize> { Ok(self.encoding.len()) } /// Merge the list of encodings into one final :class:`~tokenizers.Encoding` /// /// Args: /// encodings (A :obj:`List` of :class:`~tokenizers.Encoding`): /// The list of encodings that should be merged in one /// /// growing_offsets (:obj:`bool`, defaults to :obj:`True`): /// Whether the offsets should accumulate while merging /// /// Returns: /// :class:`~tokenizers.Encoding`: The resulting Encoding #[staticmethod] #[pyo3(signature = (encodings, growing_offsets = true))] #[pyo3(text_signature = "(encodings, growing_offsets=True)")] fn merge(encodings: Vec<PyRef<PyEncoding>>, growing_offsets: bool) -> PyEncoding { tk::tokenizer::Encoding::merge( encodings.into_iter().map(|e| e.encoding.clone()), growing_offsets, ) .into() } /// The number of sequences represented /// /// Returns: /// :obj:`int`: The number of sequences in this :class:`~tokenizers.Encoding` #[getter] fn get_n_sequences(&self) -> usize { self.encoding.n_sequences() } /// Set the given sequence index /// /// Set the given sequence index for the whole range of tokens contained in this /// :class:`~tokenizers.Encoding`. #[pyo3(text_signature = "(self, sequence_id)")] fn set_sequence_id(&mut self, sequence_id: usize) { self.encoding.set_sequence_id(sequence_id); } /// The generated IDs /// /// The IDs are the main input to a Language Model. They are the token indices, /// the numerical representations that a LM understands. /// /// Returns: /// :obj:`List[int]`: The list of IDs #[getter] fn get_ids(&self) -> Vec<u32> { self.encoding.get_ids().to_vec() } /// The generated tokens /// /// They are the string representation of the IDs. /// /// Returns: /// :obj:`List[str]`: The list of tokens #[getter] fn get_tokens(&self) -> Vec<String> { self.encoding.get_tokens().to_vec() } /// The generated word indices. /// /// .. warning:: /// This is deprecated and will be removed in a future version. /// Please use :obj:`~tokenizers.Encoding.word_ids` instead. /// /// They represent the index of the word associated to each token. /// When the input is pre-tokenized, they correspond to the ID of the given input label, /// otherwise they correspond to the words indices as defined by the /// :class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used. /// /// For special tokens and such (any token that was generated from something that was /// not part of the input), the output is :obj:`None` /// /// Returns: /// A :obj:`List` of :obj:`Optional[int]`: A list of optional word index. #[getter] fn get_words(&self, py: Python<'_>) -> PyResult<Vec<Option<u32>>> { deprecation_warning( py, "0.9.4", "Encoding.words is deprecated, please use Encoding.word_ids instead.", )?; Ok(self.get_word_ids()) } /// The generated word indices. /// /// They represent the index of the word associated to each token. /// When the input is pre-tokenized, they correspond to the ID of the given input label, /// otherwise they correspond to the words indices as defined by the /// :class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used. /// /// For special tokens and such (any token that was generated from something that was /// not part of the input), the output is :obj:`None` /// /// Returns: /// A :obj:`List` of :obj:`Optional[int]`: A list of optional word index. #[getter] fn get_word_ids(&self) -> Vec<Option<u32>> { self.encoding.get_word_ids().to_vec() } /// The generated sequence indices. /// /// They represent the index of the input sequence associated to each token. /// The sequence id can be None if the token is not related to any input sequence, /// like for example with special tokens. /// /// Returns: /// A :obj:`List` of :obj:`Optional[int]`: A list of optional sequence index. #[getter] fn get_sequence_ids(&self) -> Vec<Option<usize>> { self.encoding.get_sequence_ids() } /// The generated type IDs /// /// Generally used for tasks like sequence classification or question answering, /// these tokens let the LM know which input sequence corresponds to each tokens. /// /// Returns: /// :obj:`List[int]`: The list of type ids #[getter] fn get_type_ids(&self) -> Vec<u32> { self.encoding.get_type_ids().to_vec() } /// The offsets associated to each token /// /// These offsets let's you slice the input string, and thus retrieve the original /// part that led to producing the corresponding token. /// /// Returns: /// A :obj:`List` of :obj:`Tuple[int, int]`: The list of offsets #[getter] fn get_offsets(&self) -> Vec<(usize, usize)> { self.encoding.get_offsets().to_vec() } /// The special token mask /// /// This indicates which tokens are special tokens, and which are not. /// /// Returns: /// :obj:`List[int]`: The special tokens mask #[getter] fn get_special_tokens_mask(&self) -> Vec<u32> { self.encoding.get_special_tokens_mask().to_vec() } /// The attention mask /// /// This indicates to the LM which tokens should be attended to, and which should not. /// This is especially important when batching sequences, where we need to applying /// padding. /// /// Returns: /// :obj:`List[int]`: The attention mask #[getter] fn get_attention_mask(&self) -> Vec<u32> { self.encoding.get_attention_mask().to_vec() } /// A :obj:`List` of overflowing :class:`~tokenizers.Encoding` /// /// When using truncation, the :class:`~tokenizers.Tokenizer` takes care of splitting /// the output into as many pieces as required to match the specified maximum length. /// This field lets you retrieve all the subsequent pieces. /// /// When you use pairs of sequences, the overflowing pieces will contain enough /// variations to cover all the possible combinations, while respecting the provided /// maximum length. #[getter] fn get_overflowing(&self) -> Vec<PyEncoding> { self.encoding .get_overflowing() .clone() .into_iter() .map(|e| e.into()) .collect() } /// Get the encoded tokens corresponding to the word at the given index /// in one of the input sequences. /// /// Args: /// word_index (:obj:`int`): /// The index of a word in one of the input sequences. /// sequence_index (:obj:`int`, defaults to :obj:`0`): /// The index of the sequence that contains the target word /// /// Returns: /// :obj:`Tuple[int, int]`: The range of tokens: :obj:`(first, last + 1)` #[pyo3(signature = (word_index, sequence_index = 0))] #[pyo3(text_signature = "(self, word_index, sequence_index=0)")] fn word_to_tokens(&self, word_index: u32, sequence_index: usize) -> Option<(usize, usize)> { self.encoding.word_to_tokens(word_index, sequence_index) } /// Get the offsets of the word at the given index in one of the input sequences. /// /// Args: /// word_index (:obj:`int`): /// The index of a word in one of the input sequences. /// sequence_index (:obj:`int`, defaults to :obj:`0`): /// The index of the sequence that contains the target word /// /// Returns: /// :obj:`Tuple[int, int]`: The range of characters (span) :obj:`(first, last + 1)` #[pyo3(signature = (word_index, sequence_index = 0))] #[pyo3(text_signature = "(self, word_index, sequence_index=0)")] fn word_to_chars(&self, word_index: u32, sequence_index: usize) -> Option<Offsets> { self.encoding.word_to_chars(word_index, sequence_index) } /// Get the index of the sequence represented by the given token. /// /// In the general use case, this method returns :obj:`0` for a single sequence or /// the first sequence of a pair, and :obj:`1` for the second sequence of a pair /// /// Args: /// token_index (:obj:`int`): /// The index of a token in the encoded sequence. /// /// Returns: /// :obj:`int`: The sequence id of the given token #[pyo3(text_signature = "(self, token_index)")] fn token_to_sequence(&self, token_index: usize) -> Option<usize> { self.encoding.token_to_sequence(token_index) } /// Get the offsets of the token at the given index. /// /// The returned offsets are related to the input sequence that contains the /// token. In order to determine in which input sequence it belongs, you /// must call :meth:`~tokenizers.Encoding.token_to_sequence()`. /// /// Args: /// token_index (:obj:`int`): /// The index of a token in the encoded sequence. /// /// Returns: /// :obj:`Tuple[int, int]`: The token offsets :obj:`(first, last + 1)` #[pyo3(text_signature = "(self, token_index)")] fn token_to_chars(&self, token_index: usize) -> Option<Offsets> { let (_, offsets) = self.encoding.token_to_chars(token_index)?; Some(offsets) } /// Get the index of the word that contains the token in one of the input sequences. /// /// The returned word index is related to the input sequence that contains /// the token. In order to determine in which input sequence it belongs, you /// must call :meth:`~tokenizers.Encoding.token_to_sequence()`. /// /// Args: /// token_index (:obj:`int`): /// The index of a token in the encoded sequence. /// /// Returns: /// :obj:`int`: The index of the word in the relevant input sequence. #[pyo3(text_signature = "(self, token_index)")] fn token_to_word(&self, token_index: usize) -> Option<u32> { let (_, word_idx) = self.encoding.token_to_word(token_index)?; Some(word_idx) } /// Get the token that contains the char at the given position in the input sequence. /// /// Args: /// char_pos (:obj:`int`): /// The position of a char in the input string /// sequence_index (:obj:`int`, defaults to :obj:`0`): /// The index of the sequence that contains the target char /// /// Returns: /// :obj:`int`: The index of the token that contains this char in the encoded sequence #[pyo3(signature = (char_pos, sequence_index = 0))] #[pyo3(text_signature = "(self, char_pos, sequence_index=0)")] fn char_to_token(&self, char_pos: usize, sequence_index: usize) -> Option<usize> { self.encoding.char_to_token(char_pos, sequence_index) } /// Get the word that contains the char at the given position in the input sequence. /// /// Args: /// char_pos (:obj:`int`): /// The position of a char in the input string /// sequence_index (:obj:`int`, defaults to :obj:`0`): /// The index of the sequence that contains the target char /// /// Returns: /// :obj:`int`: The index of the word that contains this char in the input sequence #[pyo3(signature = (char_pos, sequence_index = 0))] #[pyo3(text_signature = "(self, char_pos, sequence_index=0)")] fn char_to_word(&self, char_pos: usize, sequence_index: usize) -> Option<u32> { self.encoding.char_to_word(char_pos, sequence_index) } /// Pad the :class:`~tokenizers.Encoding` at the given length /// /// Args: /// length (:obj:`int`): /// The desired length /// /// direction: (:obj:`str`, defaults to :obj:`right`): /// The expected padding direction. Can be either :obj:`right` or :obj:`left` /// /// pad_id (:obj:`int`, defaults to :obj:`0`): /// The ID corresponding to the padding token /// /// pad_type_id (:obj:`int`, defaults to :obj:`0`): /// The type ID corresponding to the padding token /// /// pad_token (:obj:`str`, defaults to `[PAD]`): /// The pad token to use #[pyo3(signature = (length, **kwargs))] #[pyo3( text_signature = "(self, length, direction='right', pad_id=0, pad_type_id=0, pad_token='[PAD]')" )] fn pad(&mut self, length: usize, kwargs: Option<&Bound<'_, PyDict>>) -> PyResult<()> { let mut pad_id = 0; let mut pad_type_id = 0; let mut pad_token = "[PAD]".to_string(); let mut direction = PaddingDirection::Right; if let Some(kwargs) = kwargs { for (key, value) in kwargs { let key: &str = key.extract()?; match key { "direction" => { let value: &str = value.extract()?; direction = match value { "left" => Ok(PaddingDirection::Left), "right" => Ok(PaddingDirection::Right), other => Err(PyError(format!( "Unknown `direction`: `{}`. Use \ one of `left` or `right`", other )) .into_pyerr::<exceptions::PyValueError>()), }?; } "pad_id" => pad_id = value.extract()?, "pad_type_id" => pad_type_id = value.extract()?, "pad_token" => pad_token = value.extract()?, _ => println!("Ignored unknown kwarg option {}", key), } } } self.encoding .pad(length, pad_id, pad_type_id, &pad_token, direction); Ok(()) } /// Truncate the :class:`~tokenizers.Encoding` at the given length /// /// If this :class:`~tokenizers.Encoding` represents multiple sequences, when truncating /// this information is lost. It will be considered as representing a single sequence. /// /// Args: /// max_length (:obj:`int`): /// The desired length /// /// stride (:obj:`int`, defaults to :obj:`0`): /// The length of previous content to be included in each overflowing piece /// /// direction (:obj:`str`, defaults to :obj:`right`): /// Truncate direction #[pyo3(signature = (max_length, stride = 0, direction = "right"))] #[pyo3(text_signature = "(self, max_length, stride=0, direction='right')")] fn truncate(&mut self, max_length: usize, stride: usize, direction: &str) -> PyResult<()> { let tdir = match direction { "left" => Ok(TruncationDirection::Left), "right" => Ok(TruncationDirection::Right), _ => Err(PyError(format!( "Invalid truncation direction value : {}", direction )) .into_pyerr::<exceptions::PyValueError>()), }?; self.encoding.truncate(max_length, stride, tdir); Ok(()) } }
tokenizers/bindings/python/src/encoding.rs/0
{ "file_path": "tokenizers/bindings/python/src/encoding.rs", "repo_id": "tokenizers", "token_count": 7410 }
270
import argparse import inspect import os from pathlib import Path INDENT = " " * 4 GENERATED_COMMENT = "# Generated content DO NOT EDIT\n" def do_indent(text: str, indent: str): return text.replace("\n", f"\n{indent}") def function(obj, indent, text_signature=None): if text_signature is None: text_signature = obj.__text_signature__ string = "" string += f"{indent}def {obj.__name__}{text_signature}:\n" indent += INDENT string += f'{indent}"""\n' string += f"{indent}{do_indent(obj.__doc__, indent)}\n" string += f'{indent}"""\n' string += f"{indent}pass\n" string += "\n" string += "\n" return string def member_sort(member): if inspect.isclass(member): value = 10 + len(inspect.getmro(member)) else: value = 1 return value def fn_predicate(obj): value = inspect.ismethoddescriptor(obj) or inspect.isbuiltin(obj) if value: return obj.__doc__ and obj.__text_signature__ and not obj.__name__.startswith("_") if inspect.isgetsetdescriptor(obj): return obj.__doc__ and not obj.__name__.startswith("_") return False def get_module_members(module): members = [ member for name, member in inspect.getmembers(module) if not name.startswith("_") and not inspect.ismodule(member) ] members.sort(key=member_sort) return members def pyi_file(obj, indent=""): string = "" if inspect.ismodule(obj): string += GENERATED_COMMENT members = get_module_members(obj) for member in members: string += pyi_file(member, indent) elif inspect.isclass(obj): indent += INDENT mro = inspect.getmro(obj) if len(mro) > 2: inherit = f"({mro[1].__name__})" else: inherit = "" string += f"class {obj.__name__}{inherit}:\n" body = "" if obj.__doc__: body += f'{indent}"""\n{indent}{do_indent(obj.__doc__, indent)}\n{indent}"""\n' fns = inspect.getmembers(obj, fn_predicate) # Init if obj.__text_signature__: body += f"{indent}def __init__{obj.__text_signature__}:\n" body += f"{indent+INDENT}pass\n" body += "\n" for name, fn in fns: body += pyi_file(fn, indent=indent) if not body: body += f"{indent}pass\n" string += body string += "\n\n" elif inspect.isbuiltin(obj): string += f"{indent}@staticmethod\n" string += function(obj, indent) elif inspect.ismethoddescriptor(obj): string += function(obj, indent) elif inspect.isgetsetdescriptor(obj): # TODO it would be interesing to add the setter maybe ? string += f"{indent}@property\n" string += function(obj, indent, text_signature="(self)") else: raise Exception(f"Object {obj} is not supported") return string def py_file(module, origin): members = get_module_members(module) string = GENERATED_COMMENT string += f"from .. import {origin}\n" string += "\n" for member in members: name = member.__name__ string += f"{name} = {origin}.{name}\n" return string import subprocess from typing import List, Optional, Tuple def do_ruff(code, is_pyi: bool): command = ["ruff", "format", "--config", "pyproject.toml", "--silent", "-"] if is_pyi: command.extend(["--stdin-filename", "test.pyi"]) process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) stdout, _ = process.communicate(input=code.encode("utf-8")) return stdout.decode("utf-8") def write(module, directory, origin, check=False): submodules = [(name, member) for name, member in inspect.getmembers(module) if inspect.ismodule(member)] filename = os.path.join(directory, "__init__.pyi") pyi_content = pyi_file(module) pyi_content = do_ruff(pyi_content, is_pyi=True) os.makedirs(directory, exist_ok=True) if check: with open(filename, "r") as f: data = f.read() assert data == pyi_content, f"The content of {filename} seems outdated, please run `python stub.py`" else: with open(filename, "w") as f: f.write(pyi_content) filename = os.path.join(directory, "__init__.py") py_content = py_file(module, origin) py_content = do_ruff(py_content, is_pyi=False) os.makedirs(directory, exist_ok=True) is_auto = False if not os.path.exists(filename): is_auto = True else: with open(filename, "r") as f: line = f.readline() if line == GENERATED_COMMENT: is_auto = True if is_auto: if check: with open(filename, "r") as f: data = f.read() assert data == py_content, f"The content of {filename} seems outdated, please run `python stub.py`" else: with open(filename, "w") as f: f.write(py_content) for name, submodule in submodules: write(submodule, os.path.join(directory, name), f"{name}", check=check) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--check", action="store_true") args = parser.parse_args() import tokenizers write(tokenizers.tokenizers, "py_src/tokenizers/", "tokenizers", check=args.check)
tokenizers/bindings/python/stub.py/0
{ "file_path": "tokenizers/bindings/python/stub.py", "repo_id": "tokenizers", "token_count": 2395 }
271
# Models <tokenizerslangcontent> <python> ## BPE [[autodoc]] tokenizers.models.BPE ## Model [[autodoc]] tokenizers.models.Model ## Unigram [[autodoc]] tokenizers.models.Unigram ## WordLevel [[autodoc]] tokenizers.models.WordLevel ## WordPiece [[autodoc]] tokenizers.models.WordPiece </python> <rust> The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website. </rust> <node> The node API has not been documented yet. </node> </tokenizerslangcontent>
tokenizers/docs/source-doc-builder/api/models.mdx/0
{ "file_path": "tokenizers/docs/source-doc-builder/api/models.mdx", "repo_id": "tokenizers", "token_count": 179 }
272
Installation with npm ---------------------------------------------------------------------------------------------------- You can simply install 🤗 Tokenizers with npm using:: npm install tokenizers
tokenizers/docs/source/installation/node.inc/0
{ "file_path": "tokenizers/docs/source/installation/node.inc", "repo_id": "tokenizers", "token_count": 31 }
273
#[macro_use] extern crate criterion; use criterion::{Criterion, Throughput}; use tokenizers::Tokenizer; pub fn llama3(c: &mut Criterion) { let data = std::fs::read_to_string("data/big.txt").unwrap(); let mut group = c.benchmark_group("llama3-encode"); group.throughput(Throughput::Bytes(data.bytes().len() as u64)); group.bench_function("llama3-offsets", |b| { let tokenizer = Tokenizer::from_pretrained("meta-llama/Meta-Llama-3.1-8B-Instruct", None).unwrap(); let data: Vec<_> = data.lines().collect(); let add_special_tokens = false; b.iter(|| { tokenizer .encode_batch_char_offsets(criterion::black_box(data.clone()), add_special_tokens) .unwrap() }) }); group.bench_function("llama3-nooffsets", |b| { let tokenizer = Tokenizer::from_pretrained("meta-llama/Meta-Llama-3.1-8B-Instruct", None).unwrap(); let data: Vec<_> = data.lines().collect(); let add_special_tokens = false; b.iter(|| { tokenizer .encode_batch(criterion::black_box(data.clone()), add_special_tokens) .unwrap() }) }); group.finish(); } criterion_group! { name = bert_benches; config = Criterion::default().sample_size(10); targets = llama3 } criterion_main!(bert_benches);
tokenizers/tokenizers/benches/llama3.rs/0
{ "file_path": "tokenizers/tokenizers/benches/llama3.rs", "repo_id": "tokenizers", "token_count": 645 }
274
// A dependency graph that contains any wasm must all be imported // asynchronously. This `bootstrap.js` file does the single async import, so // that no one else needs to worry about it again. import("./index.js") .catch(e => console.error("Error importing `index.js`:", e));
tokenizers/tokenizers/examples/unstable_wasm/www/bootstrap.js/0
{ "file_path": "tokenizers/tokenizers/examples/unstable_wasm/www/bootstrap.js", "repo_id": "tokenizers", "token_count": 79 }
275
//! [Byte Pair Encoding](https://www.aclweb.org/anthology/P16-1162/) model. use std::{iter, mem}; mod model; mod serialization; pub mod trainer; mod word; type Pair = (u32, u32); /// Errors that can be encountered while using or constructing a `BPE` model. #[derive(thiserror::Error, Debug)] pub enum Error { /// An error encountered while reading files mainly. #[error("IoError: {0}")] Io(#[from] std::io::Error), /// An error forwarded from Serde, while parsing JSON #[error("JsonError: {0}")] JsonError(#[from] serde_json::Error), /// When the vocab.json file is in the wrong format #[error("Bad vocabulary json file")] BadVocabulary, /// When the merges.txt file is in the wrong format. This error holds the line /// number of the line that caused the error. #[error("Merges text file invalid at line {0}")] BadMerges(usize), /// If a token found in merges, is not in the vocab #[error("Token `{0}` out of vocabulary")] MergeTokenOutOfVocabulary(String), /// If the provided unk token is out of vocabulary #[error("Unk token `{0}` not found in the vocabulary")] UnkTokenOutOfVocabulary(String), /// Dropout not between 0 and 1. #[error("Dropout should be between 0 and 1, inclusive")] InvalidDropout, } /// Provides access to the `FirstLastIterator` to any Iterator pub(crate) trait WithFirstLastIterator: Iterator + Sized { fn with_first_and_last(self) -> FirstLastIterator<Self>; } impl<I> WithFirstLastIterator for I where I: Iterator, { fn with_first_and_last(self) -> FirstLastIterator<Self> { FirstLastIterator { first: true, iter: self.peekable(), } } } /// Provides information about whether an item is the first and/or the last of the iterator pub(crate) struct FirstLastIterator<I> where I: Iterator, { first: bool, iter: iter::Peekable<I>, } impl<I> Iterator for FirstLastIterator<I> where I: Iterator, { /// (is_first, is_last, item) type Item = (bool, bool, I::Item); fn next(&mut self) -> Option<Self::Item> { let first = mem::replace(&mut self.first, false); self.iter .next() .map(|e| (first, self.iter.peek().is_none(), e)) } } // Re-export pub use model::*; pub use trainer::*; use word::*;
tokenizers/tokenizers/src/models/bpe/mod.rs/0
{ "file_path": "tokenizers/tokenizers/src/models/bpe/mod.rs", "repo_id": "tokenizers", "token_count": 893 }
276
use super::{super::OrderedVocabIter, WordPiece, WordPieceBuilder}; use serde::{ de::{MapAccess, Visitor}, ser::SerializeStruct, Deserialize, Deserializer, Serialize, Serializer, }; use std::collections::HashSet; impl Serialize for WordPiece { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { let mut model = serializer.serialize_struct("WordPiece", 5)?; // Small fields first model.serialize_field("type", "WordPiece")?; model.serialize_field("unk_token", &self.unk_token)?; model.serialize_field("continuing_subword_prefix", &self.continuing_subword_prefix)?; model.serialize_field("max_input_chars_per_word", &self.max_input_chars_per_word)?; // Then large ones let ordered_vocab = OrderedVocabIter::new(&self.vocab_r); model.serialize_field("vocab", &ordered_vocab)?; model.end() } } impl<'de> Deserialize<'de> for WordPiece { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { deserializer.deserialize_struct( "WordPiece", &[ "type", "unk_token", "continuing_subword_prefix", "max_input_chars_per_word", "vocab", ], WordPieceVisitor, ) } } struct WordPieceVisitor; impl<'de> Visitor<'de> for WordPieceVisitor { type Value = WordPiece; fn expecting(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { write!(fmt, "struct WordPiece") } fn visit_map<V>(self, mut map: V) -> std::result::Result<Self::Value, V::Error> where V: MapAccess<'de>, { let mut builder = WordPieceBuilder::new(); let mut missing_fields = vec![ // for retrocompatibility the "type" field is not mandatory "unk_token", "continuing_subword_prefix", "max_input_chars_per_word", "vocab", ] .into_iter() .collect::<HashSet<_>>(); while let Some(key) = map.next_key::<String>()? { match key.as_ref() { "unk_token" => builder = builder.unk_token(map.next_value()?), "continuing_subword_prefix" => { builder = builder.continuing_subword_prefix(map.next_value()?) } "max_input_chars_per_word" => { builder = builder.max_input_chars_per_word(map.next_value()?) } "vocab" => builder = builder.vocab(map.next_value()?), "type" => match map.next_value()? { "WordPiece" => {} u => { return Err(serde::de::Error::invalid_value( serde::de::Unexpected::Str(u), &"WordPiece", )) } }, _ => {} } missing_fields.remove::<str>(&key); } if !missing_fields.is_empty() { Err(serde::de::Error::missing_field( missing_fields.iter().next().unwrap(), )) } else { Ok(builder.build().map_err(serde::de::Error::custom)?) } } } #[cfg(test)] mod tests { use super::*; #[test] fn serde() { let wp = WordPiece::default(); let wp_s = "{\ \"type\":\"WordPiece\",\ \"unk_token\":\"[UNK]\",\ \"continuing_subword_prefix\":\"##\",\ \"max_input_chars_per_word\":100,\ \"vocab\":{}\ }"; assert_eq!(serde_json::to_string(&wp).unwrap(), wp_s); assert_eq!(serde_json::from_str::<WordPiece>(wp_s).unwrap(), wp); } #[test] fn deserialization_should_fail() { let missing_unk = "{\ \"type\":\"WordPiece\",\ \"continuing_subword_prefix\":\"##\",\ \"max_input_chars_per_word\":100,\ \"vocab\":{}\ }"; assert!(serde_json::from_str::<WordPiece>(missing_unk) .unwrap_err() .to_string() .starts_with("missing field `unk_token`")); let wrong_type = "{\ \"type\":\"WordLevel\",\ \"unk_token\":\"[UNK]\",\ \"vocab\":{}\ }"; assert!(serde_json::from_str::<WordPiece>(wrong_type) .unwrap_err() .to_string() .starts_with("invalid value: string \"WordLevel\", expected WordPiece")); } }
tokenizers/tokenizers/src/models/wordpiece/serialization.rs/0
{ "file_path": "tokenizers/tokenizers/src/models/wordpiece/serialization.rs", "repo_id": "tokenizers", "token_count": 2453 }
277
pub mod bert; pub mod byte_level; pub mod delimiter; pub mod digits; pub mod metaspace; pub mod punctuation; pub mod sequence; pub mod split; pub mod unicode_scripts; pub mod whitespace; use serde::{Deserialize, Deserializer, Serialize}; use crate::pre_tokenizers::bert::BertPreTokenizer; use crate::pre_tokenizers::byte_level::ByteLevel; use crate::pre_tokenizers::delimiter::CharDelimiterSplit; use crate::pre_tokenizers::digits::Digits; use crate::pre_tokenizers::metaspace::Metaspace; use crate::pre_tokenizers::punctuation::Punctuation; use crate::pre_tokenizers::sequence::Sequence; use crate::pre_tokenizers::split::Split; use crate::pre_tokenizers::unicode_scripts::UnicodeScripts; use crate::pre_tokenizers::whitespace::{Whitespace, WhitespaceSplit}; use crate::{PreTokenizedString, PreTokenizer}; #[derive(Serialize, Clone, Debug, PartialEq)] #[serde(untagged)] pub enum PreTokenizerWrapper { BertPreTokenizer(BertPreTokenizer), ByteLevel(ByteLevel), Delimiter(CharDelimiterSplit), Metaspace(Metaspace), Whitespace(Whitespace), Sequence(Sequence), Split(Split), Punctuation(Punctuation), WhitespaceSplit(WhitespaceSplit), Digits(Digits), UnicodeScripts(UnicodeScripts), } impl PreTokenizer for PreTokenizerWrapper { fn pre_tokenize(&self, normalized: &mut PreTokenizedString) -> crate::Result<()> { match self { Self::BertPreTokenizer(bpt) => bpt.pre_tokenize(normalized), Self::ByteLevel(bpt) => bpt.pre_tokenize(normalized), Self::Delimiter(dpt) => dpt.pre_tokenize(normalized), Self::Metaspace(mspt) => mspt.pre_tokenize(normalized), Self::Whitespace(wspt) => wspt.pre_tokenize(normalized), Self::Punctuation(tok) => tok.pre_tokenize(normalized), Self::Sequence(tok) => tok.pre_tokenize(normalized), Self::Split(tok) => tok.pre_tokenize(normalized), Self::WhitespaceSplit(wspt) => wspt.pre_tokenize(normalized), Self::Digits(wspt) => wspt.pre_tokenize(normalized), Self::UnicodeScripts(us) => us.pre_tokenize(normalized), } } } impl<'de> Deserialize<'de> for PreTokenizerWrapper { fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error> where D: Deserializer<'de>, { #[derive(Deserialize)] pub struct Tagged { #[serde(rename = "type")] variant: EnumType, #[serde(flatten)] rest: serde_json::Value, } #[derive(Deserialize, Serialize)] pub enum EnumType { BertPreTokenizer, ByteLevel, Delimiter, Metaspace, Whitespace, Sequence, Split, Punctuation, WhitespaceSplit, Digits, UnicodeScripts, } #[derive(Deserialize)] #[serde(untagged)] pub enum PreTokenizerHelper { Tagged(Tagged), Legacy(serde_json::Value), } #[derive(Deserialize)] #[serde(untagged)] pub enum PreTokenizerUntagged { BertPreTokenizer(BertPreTokenizer), ByteLevel(ByteLevel), Delimiter(CharDelimiterSplit), Metaspace(Metaspace), Whitespace(Whitespace), Sequence(Sequence), Split(Split), Punctuation(Punctuation), WhitespaceSplit(WhitespaceSplit), Digits(Digits), UnicodeScripts(UnicodeScripts), } let helper = PreTokenizerHelper::deserialize(deserializer)?; Ok(match helper { PreTokenizerHelper::Tagged(pretok) => { let mut values: serde_json::Map<String, serde_json::Value> = serde_json::from_value(pretok.rest).map_err(serde::de::Error::custom)?; values.insert( "type".to_string(), serde_json::to_value(&pretok.variant).map_err(serde::de::Error::custom)?, ); let values = serde_json::Value::Object(values); match pretok.variant { EnumType::BertPreTokenizer => PreTokenizerWrapper::BertPreTokenizer( serde_json::from_value(values).map_err(serde::de::Error::custom)?, ), EnumType::ByteLevel => PreTokenizerWrapper::ByteLevel( serde_json::from_value(values).map_err(serde::de::Error::custom)?, ), EnumType::Delimiter => PreTokenizerWrapper::Delimiter( serde_json::from_value(values).map_err(serde::de::Error::custom)?, ), EnumType::Metaspace => PreTokenizerWrapper::Metaspace( serde_json::from_value(values).map_err(serde::de::Error::custom)?, ), EnumType::Whitespace => PreTokenizerWrapper::Whitespace( serde_json::from_value(values).map_err(serde::de::Error::custom)?, ), EnumType::Sequence => PreTokenizerWrapper::Sequence( serde_json::from_value(values).map_err(serde::de::Error::custom)?, ), EnumType::Split => PreTokenizerWrapper::Split( serde_json::from_value(values).map_err(serde::de::Error::custom)?, ), EnumType::Punctuation => PreTokenizerWrapper::Punctuation( serde_json::from_value(values).map_err(serde::de::Error::custom)?, ), EnumType::WhitespaceSplit => PreTokenizerWrapper::WhitespaceSplit( serde_json::from_value(values).map_err(serde::de::Error::custom)?, ), EnumType::Digits => PreTokenizerWrapper::Digits( serde_json::from_value(values).map_err(serde::de::Error::custom)?, ), EnumType::UnicodeScripts => PreTokenizerWrapper::UnicodeScripts( serde_json::from_value(values).map_err(serde::de::Error::custom)?, ), } } PreTokenizerHelper::Legacy(value) => { let untagged = serde_json::from_value(value).map_err(serde::de::Error::custom)?; match untagged { PreTokenizerUntagged::BertPreTokenizer(bert) => { PreTokenizerWrapper::BertPreTokenizer(bert) } PreTokenizerUntagged::ByteLevel(byte_level) => { PreTokenizerWrapper::ByteLevel(byte_level) } PreTokenizerUntagged::Delimiter(delimiter) => { PreTokenizerWrapper::Delimiter(delimiter) } PreTokenizerUntagged::Metaspace(metaspace) => { PreTokenizerWrapper::Metaspace(metaspace) } PreTokenizerUntagged::Whitespace(whitespace) => { PreTokenizerWrapper::Whitespace(whitespace) } PreTokenizerUntagged::Sequence(sequence) => { PreTokenizerWrapper::Sequence(sequence) } PreTokenizerUntagged::Split(split) => PreTokenizerWrapper::Split(split), PreTokenizerUntagged::Punctuation(punctuation) => { PreTokenizerWrapper::Punctuation(punctuation) } PreTokenizerUntagged::WhitespaceSplit(whitespace_split) => { PreTokenizerWrapper::WhitespaceSplit(whitespace_split) } PreTokenizerUntagged::Digits(digits) => PreTokenizerWrapper::Digits(digits), PreTokenizerUntagged::UnicodeScripts(unicode_scripts) => { PreTokenizerWrapper::UnicodeScripts(unicode_scripts) } } } }) } } impl_enum_from!(BertPreTokenizer, PreTokenizerWrapper, BertPreTokenizer); impl_enum_from!(ByteLevel, PreTokenizerWrapper, ByteLevel); impl_enum_from!(CharDelimiterSplit, PreTokenizerWrapper, Delimiter); impl_enum_from!(Whitespace, PreTokenizerWrapper, Whitespace); impl_enum_from!(Punctuation, PreTokenizerWrapper, Punctuation); impl_enum_from!(Sequence, PreTokenizerWrapper, Sequence); impl_enum_from!(Split, PreTokenizerWrapper, Split); impl_enum_from!(Metaspace, PreTokenizerWrapper, Metaspace); impl_enum_from!(WhitespaceSplit, PreTokenizerWrapper, WhitespaceSplit); impl_enum_from!(Digits, PreTokenizerWrapper, Digits); impl_enum_from!(UnicodeScripts, PreTokenizerWrapper, UnicodeScripts); #[cfg(test)] mod tests { use super::metaspace::PrependScheme; use super::*; #[test] fn test_deserialize() { let pre_tokenizer: PreTokenizerWrapper = serde_json::from_str(r#"{"type":"Sequence","pretokenizers":[{"type":"WhitespaceSplit"},{"type":"Metaspace","replacement":"▁","str_rep":"▁","add_prefix_space":true}]}"#).unwrap(); assert_eq!( pre_tokenizer, PreTokenizerWrapper::Sequence(Sequence::new(vec![ PreTokenizerWrapper::WhitespaceSplit(WhitespaceSplit {}), PreTokenizerWrapper::Metaspace(Metaspace::new('▁', PrependScheme::Always, true)) ])) ); let pre_tokenizer: PreTokenizerWrapper = serde_json::from_str( r#"{"type":"Metaspace","replacement":"▁","add_prefix_space":true}"#, ) .unwrap(); assert_eq!( pre_tokenizer, PreTokenizerWrapper::Metaspace(Metaspace::new('▁', PrependScheme::Always, true)) ); let pre_tokenizer: PreTokenizerWrapper = serde_json::from_str(r#"{"type":"Sequence","pretokenizers":[{"type":"WhitespaceSplit"},{"type":"Metaspace","replacement":"▁","add_prefix_space":true}]}"#).unwrap(); assert_eq!( pre_tokenizer, PreTokenizerWrapper::Sequence(Sequence::new(vec![ PreTokenizerWrapper::WhitespaceSplit(WhitespaceSplit {}), PreTokenizerWrapper::Metaspace(Metaspace::new('▁', PrependScheme::Always, true)) ])) ); let pre_tokenizer: PreTokenizerWrapper = serde_json::from_str( r#"{"type":"Metaspace","replacement":"▁","add_prefix_space":true, "prepend_scheme":"first"}"#, ) .unwrap(); assert_eq!( pre_tokenizer, PreTokenizerWrapper::Metaspace(Metaspace::new( '▁', metaspace::PrependScheme::First, true )) ); let pre_tokenizer: PreTokenizerWrapper = serde_json::from_str( r#"{"type":"Metaspace","replacement":"▁","add_prefix_space":true, "prepend_scheme":"always"}"#, ) .unwrap(); assert_eq!( pre_tokenizer, PreTokenizerWrapper::Metaspace(Metaspace::new( '▁', metaspace::PrependScheme::Always, true )) ); } #[test] fn test_deserialize_whitespace_split() { let pre_tokenizer: PreTokenizerWrapper = serde_json::from_str(r#"{"type":"WhitespaceSplit"}"#).unwrap(); assert_eq!( pre_tokenizer, PreTokenizerWrapper::WhitespaceSplit(WhitespaceSplit {}) ); } #[test] fn pre_tokenizer_deserialization_no_type() { let json = r#"{"replacement":"▁","add_prefix_space":true, "prepend_scheme":"always"}}"#; let reconstructed = serde_json::from_str::<PreTokenizerWrapper>(json); match reconstructed { Err(err) => assert_eq!( err.to_string(), "data did not match any variant of untagged enum PreTokenizerUntagged" ), _ => panic!("Expected an error here"), } let json = r#"{"type":"Metaspace", "replacement":"▁" }"#; let reconstructed = serde_json::from_str::<PreTokenizerWrapper>(json).unwrap(); assert_eq!( reconstructed, PreTokenizerWrapper::Metaspace(Metaspace::default()) ); let json = r#"{"type":"Metaspace", "add_prefix_space":true }"#; let reconstructed = serde_json::from_str::<PreTokenizerWrapper>(json); match reconstructed { Err(err) => assert_eq!(err.to_string(), "missing field `replacement`"), _ => panic!("Expected an error here"), } let json = r#"{"behavior":"default_split"}"#; let reconstructed = serde_json::from_str::<PreTokenizerWrapper>(json); match reconstructed { Err(err) => assert_eq!( err.to_string(), "data did not match any variant of untagged enum PreTokenizerUntagged" ), _ => panic!("Expected an error here"), } } }
tokenizers/tokenizers/src/pre_tokenizers/mod.rs/0
{ "file_path": "tokenizers/tokenizers/src/pre_tokenizers/mod.rs", "repo_id": "tokenizers", "token_count": 6537 }
278
use crate::pattern::Pattern; use crate::{Offsets, Result}; use std::ops::{Bound, RangeBounds}; use unicode_normalization_alignments::UnicodeNormalization; use serde::{Deserialize, Serialize}; /// The possible offsets referential #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum OffsetReferential { Original, Normalized, } /// Represents a Range usable by the NormalizedString to index its content. /// A Range can use indices relative to either the `Original` or the `Normalized` string #[derive(Debug, Clone, PartialEq, Eq)] pub enum Range<T: RangeBounds<usize> + Clone> { Original(T), Normalized(T), } #[allow(clippy::len_without_is_empty)] impl<T> Range<T> where T: RangeBounds<usize> + Clone, { /// Unwrap the underlying range pub fn unwrap(self) -> T { match self { Self::Original(r) => r, Self::Normalized(r) => r, } } /// Return the length of the current Range if not Unbounded pub fn len(&self) -> Option<usize> { let range = self.clone().unwrap(); let end = match range.end_bound() { Bound::Unbounded => None, Bound::Included(i) => Some(*i + 1), Bound::Excluded(i) => Some(*i), }?; match range.start_bound() { Bound::Unbounded => Some(end), Bound::Included(i) => Some(end - (*i + 1)), Bound::Excluded(i) => Some(end - *i), } } /// Converts the current Range to a `std::ops::Range<usize>`. This requires the `max_len` /// of the represented string (in chars, not bytes) in order to cover the case where the /// original provided range was unbounded pub fn into_full_range(self, max_len: usize) -> std::ops::Range<usize> { let range = self.unwrap(); let start = match range.start_bound() { Bound::Unbounded => 0, Bound::Included(i) => *i, Bound::Excluded(i) => *i + 1, }; let end = match range.end_bound() { Bound::Unbounded => max_len, Bound::Included(i) => *i + 1, Bound::Excluded(i) => *i, }; start..end } } /// Defines the expected behavior for the delimiter of a Split Pattern /// When splitting on `'-'` for example, with input `the-final--countdown`: /// - Removed => `[ "the", "final", "countdown" ]` /// - Isolated => `[ "the", "-", "final", "-", "-", "countdown" ]` /// - MergedWithPrevious => `[ "the-", "final-", "-", "countdown" ]` /// - MergedWithNext => `[ "the", "-final", "-", "-countdown" ]` /// - Contiguous => `[ "the", "-", "final", "--", "countdown" ]` #[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize, Eq)] pub enum SplitDelimiterBehavior { Removed, Isolated, MergedWithPrevious, MergedWithNext, Contiguous, } /// A `NormalizedString` takes care of processing an "original" string to modify /// it and obtain a "normalized" string. It keeps both version of the string, /// alignments information between both and provides an interface to retrieve /// ranges of each string, using offsets from any of them. /// /// It is possible to retrieve a part of the original string, by indexing it with /// offsets from the normalized one, and the other way around too. It is also /// possible to convert offsets from one referential to the other one easily. #[derive(Default, Debug, Clone, PartialEq, Eq)] pub struct NormalizedString { /// The original version of the string, before any modification original: String, /// The normalized version of the string, after all modifications normalized: String, /// Mapping from normalized string to original one: (start, end) for each /// byte of the normalized string alignments: Vec<(usize, usize)>, /// If this NormalizedString is a slice of a bigger one, we keep the track /// of the missing part, so that we can still give offsets from this original /// string. original_shift: usize, } impl NormalizedString { #[cfg(test)] pub(crate) fn new( original: String, normalized: String, alignments: Vec<(usize, usize)>, original_shift: usize, ) -> Self { Self { original, normalized, alignments, original_shift, } } /// Return the normalized string pub fn get(&self) -> &str { &self.normalized } /// Return the original string pub fn get_original(&self) -> &str { &self.original } /// Return the original offsets pub fn offsets_original(&self) -> Offsets { ( self.original_shift, self.original_shift + self.len_original(), ) } /// Convert the given offsets range from one referential to the other one: /// `Original => Normalized` or `Normalized => Original` /// /// Returns `None` when targeting something that is outside range pub fn convert_offsets<T>(&self, range: Range<T>) -> Option<std::ops::Range<usize>> where T: RangeBounds<usize> + Clone, { let len_original = self.len_original(); let len_normalized = self.len(); let (target, original) = match range { Range::Original(_) => (range.into_full_range(len_original), true), Range::Normalized(_) => (range.into_full_range(len_normalized), false), }; // If we target an empty range, let's return the same if target.start == target.end { return Some(target); } // If the target goes reverse, return None if target.start > target.end { return None; } // If we target 0..0 on an empty string, we want to expand to the entire equivalent if original && self.original.is_empty() && target == (0..0) { return Some(0..len_normalized); } if !original && self.normalized.is_empty() && target == (0..0) { return Some(0..len_original); } if original { let (mut start, mut end) = (None, None); self.alignments .iter() .enumerate() .take_while(|(_, alignment)| target.end >= alignment.1) .for_each(|(i, alignment)| { if start.is_none() && target.start <= alignment.0 { // For now, don't update if width == 0 if alignment.0 != alignment.1 { start = Some(i); } } if target.end >= alignment.1 { end = Some(i + 1); } }); match (start, end) { // Targetting inexistant beginning (Some(s), None) => Some(s..s), // Targetting inexistant end (None, Some(e)) => Some(e..e), // Found the range (Some(s), Some(e)) => Some(s..e), _ => None, } } else { self.alignments.get(target).and_then(expand_alignments) } } /// Return a range of the normalized string pub fn get_range<T>(&self, range: Range<T>) -> Option<&str> where T: RangeBounds<usize> + Clone, { match range { Range::Original(_) => self.normalized.get(self.convert_offsets(range)?), Range::Normalized(_) => self.normalized.get(range.into_full_range(self.len())), } } /// Return a range of the original string pub fn get_range_original<T>(&self, range: Range<T>) -> Option<&str> where T: RangeBounds<usize> + Clone, { match range { Range::Original(_) => self .original .get(range.into_full_range(self.len_original())), Range::Normalized(_) => self.original.get(self.convert_offsets(range)?), } } /// Validate the given range, to make sure it is on char boundaries fn validate_range<T: RangeBounds<usize> + Clone>( &self, range: Range<T>, ) -> Option<Range<std::ops::Range<usize>>> { match range { Range::Original(_) => { let r = range.into_full_range(self.original.len()); if !(self.original.is_char_boundary(r.start) && self.original.is_char_boundary(r.end)) { None } else { Some(Range::Original(r)) } } Range::Normalized(_) => { let r = range.into_full_range(self.normalized.len()); if !(self.normalized.is_char_boundary(r.start) && self.normalized.is_char_boundary(r.end)) { None } else { Some(Range::Normalized(r)) } } } } /// Return a slice of the current NormalizedString /// If the range is not on char boundaries, return None pub fn slice<T>(&self, range: Range<T>) -> Option<NormalizedString> where T: RangeBounds<usize> + Clone, { let full_range = self.validate_range(range)?; let (normalized_range, original_range) = match full_range { Range::Original(_) => ( self.convert_offsets(full_range.clone())?, full_range.clone().unwrap(), ), Range::Normalized(_) => ( full_range.clone().unwrap(), self.convert_offsets(full_range.clone())?, ), }; let n_shift = original_range.start; Some(Self { original: self .get_range_original(full_range.clone()) .unwrap_or_default() .into(), normalized: self.get_range(full_range).unwrap_or_default().into(), alignments: self .alignments .get(normalized_range)? .to_vec() .iter() .map(|(start, end)| (start - n_shift, end - n_shift)) .collect(), original_shift: self.original_shift + original_range.start, }) } /// Applies transformations to the current normalized version of the string, /// while updating the alignments. /// This method expect an Iterator yielding each char of the new normalized string /// with a `change` isize equals to: /// - `1` if this is a new char /// - `-N` if the char is right before N removed chars /// - `0` if the char is replacing the existing one /// /// Since it is possible that the normalized string doesn't include some of the characters at /// the beginning of the original one, we need an `initial_offset` which represents the number /// of removed chars at the very beginning. pub fn transform_range<T, I>(&mut self, range: Range<T>, dest: I, initial_offset: usize) where T: RangeBounds<usize> + Clone, I: IntoIterator<Item = (char, isize)>, { let n_range = match range { Range::Normalized(_) => range.into_full_range(self.len()), Range::Original(_) => match self.convert_offsets(range) { Some(range) => range, None => return, }, }; trace!( "===== transform_range call with {:?} (initial_offset: {}) =====", n_range, initial_offset ); // Retrieve the original characters that are being replaced. This let us // compute the change in byte sizes along the way. let mut replaced_normalized = self.normalized[n_range.clone()] .chars() .collect::<Vec<_>>() .into_iter(); let initial_removed: usize = (&mut replaced_normalized) .take(initial_offset) .map(|c| c.len_utf8()) .sum(); let mut offset = (initial_removed + n_range.start) as isize; let mut alignments = Vec::with_capacity(n_range.len()); trace!("=> Applying transformations"); let normalized = dest .into_iter() .map(|(c, changes)| { trace!( "### {:?} with size {}: {} with offset {} ###", c, c.len_utf8(), match changes { 0 => "Replacing".into(), ch if ch > 0 => "Adding".into(), ch if ch < 0 => format!("Replacing + removing {} following chars", ch), _ => "Undefined".into(), }, offset ); let idx = offset as usize; let align = if changes.is_positive() { if idx < 1 { (0, 0) } else { // This is a newly inserted character, so it shares the same alignment // than the previous one self.alignments[idx - 1] } } else { self.alignments[idx] }; // If we are replacing a character, find it and compute the change in size let replaced_char = if !changes.is_positive() { replaced_normalized.next() } else { None }; let replaced_char_size = replaced_char.map_or(0, |c| c.len_utf8()); let replaced_char_size_change = c.len_utf8() as isize - replaced_char_size as isize; if let Some(ref replaced_char) = replaced_char { trace!( "Replacing char {:?} - with a change in size: {}", replaced_char, replaced_char_size_change ); } // If we are removing some characters, find them too let total_bytes_to_remove = if changes.is_negative() { (&mut replaced_normalized) .take(-changes as usize) .map(|c| c.len_utf8()) .sum() } else { 0 }; trace!("Total bytes to remove: {}", total_bytes_to_remove); // Keep track of the changes for next offsets offset += replaced_char_size as isize; offset += total_bytes_to_remove as isize; trace!("New offset: {}", offset); trace!("New normalized alignment: {}x {:?}", c.len_utf8(), align); alignments.extend((0..c.len_utf8()).map(|_| align)); // Then we keep only the char for string reconstruction c }) .collect::<String>(); self.alignments.splice(n_range.clone(), alignments); unsafe { self.normalized .as_mut_vec() .splice(n_range, normalized.bytes()); } } /// Applies transformations to the current normalized version of the string, /// while updating the alignments. /// This method expect an Iterator yielding each char of the new normalized string /// with a `change` isize equals to: /// - `1` if this is a new char /// - `-N` if the char is right before N removed chars /// - `0` if the char is replacing the existing one /// /// Since it is possible that the normalized string doesn't include some of the characters at /// the beginning of the original one, we need an `initial_offset` which represents the number /// of removed chars at the very beginning. pub fn transform<I>(&mut self, dest: I, initial_offset: usize) where I: IntoIterator<Item = (char, isize)>, { self.transform_range(Range::Original(..), dest, initial_offset) } /// Applies NFD normalization pub fn nfd(&mut self) -> &mut Self { self.transform(self.get().to_owned().nfd(), 0); self } /// Applies NFKD normalization pub fn nfkd(&mut self) -> &mut Self { self.transform(self.get().to_owned().nfkd(), 0); self } /// Applies NFC normalization pub fn nfc(&mut self) -> &mut Self { self.transform(self.get().to_owned().nfc(), 0); self } /// Applies NFKC normalization pub fn nfkc(&mut self) -> &mut Self { self.transform(self.get().to_owned().nfkc(), 0); self } /// Applies filtering over our characters pub fn filter<F: Fn(char) -> bool>(&mut self, keep: F) -> &mut Self { let mut removed: isize = 0; let mut removed_start: usize = 0; let mut transforms = Vec::with_capacity(self.normalized.len()); let mut last_c = None; for c in self.normalized.chars() { if keep(c) { match last_c { Some(lc) => { transforms.push((lc, -removed)); } None => { removed_start = removed as usize; } } last_c = Some(c); removed = 0; } else { removed += 1; } } if let Some(lc) = last_c { transforms.push((lc, -removed)); } self.transform(transforms, removed_start); self } /// Prepend the given string to ourself pub fn prepend(&mut self, s: &str) -> &mut Self { if let Some(next) = self.normalized.chars().next() { let transformations = s .chars() .enumerate() .map(|(i, c)| (c, isize::from(i != 0))) .chain(std::iter::once((next, 1))); self.transform_range(Range::Normalized(0..next.len_utf8()), transformations, 0); } self } /// Append the given string to ourself pub fn append(&mut self, s: &str) -> &mut Self { if let Some((b, prev)) = self.normalized.char_indices().last() { let transformations = std::iter::once((prev, 0)).chain(s.chars().map(|c| (c, 1))); self.transform_range(Range::Normalized(b..), transformations, 0); } self } /// Map our characters pub fn map<F: Fn(char) -> char>(&mut self, map: F) -> &mut Self { let transformations = self .normalized .chars() .map(|c| (map(c), 0)) .collect::<Vec<_>>(); self.transform(transformations, 0); self } /// Calls the given function for each characters pub fn for_each<F: FnMut(char)>(&self, foreach: F) -> &Self { self.normalized.chars().for_each(foreach); self } /// Lowercase pub fn lowercase(&mut self) -> &mut Self { let mut new_chars: Vec<(char, isize)> = vec![]; self.for_each(|c| { c.to_lowercase().enumerate().for_each(|(index, c)| { new_chars.push((c, isize::from(index > 0))); }) }); self.transform(new_chars, 0); self } /// Uppercase pub fn uppercase(&mut self) -> &mut Self { let mut new_chars: Vec<(char, isize)> = vec![]; self.for_each(|c| { c.to_uppercase().enumerate().for_each(|(index, c)| { new_chars.push((c, isize::from(index > 0))); }) }); self.transform(new_chars, 0); self } /// Replace anything that matches the pattern with the given content. pub fn replace<P: Pattern>(&mut self, pattern: P, content: &str) -> Result<()> { let mut new_normalized = String::with_capacity(self.normalized.len()); // Initially allocate for the input size let mut new_alignments: Vec<(usize, usize)> = Vec::with_capacity(self.alignments.len()); let mut last_end = 0; // Keep track of the last end position pattern .find_matches(&self.normalized)? .into_iter() .for_each(|((start, end), is_match)| { if is_match { let range = start..end; let mut new_len = 0; let removed_chars = self.normalized[range.clone()].chars().count(); /* The following code is equivalent to this call, but computationally much more efficient self.transform_range( Range::Normalized(range), content.chars().map(|c| { new_len += c.len_utf8(); (c, 1) }), removed_chars, ); */ // Copy the part of the string that is before the match new_normalized.push_str(&self.normalized[last_end..start]); new_alignments.extend(self.alignments[last_end..start].iter().cloned()); let n_range = Range::Normalized(range).into_full_range(self.len()); // Retrieve the original characters that are being replaced. This let us // compute the change in byte sizes along the way. let mut replaced_normalized = self.normalized[n_range.clone()] .chars() .collect::<Vec<_>>() .into_iter(); let initial_removed: usize = (&mut replaced_normalized) .take(removed_chars) .map(|c| c.len_utf8()) .sum(); let dest = content.chars().map(|c| { new_len += c.len_utf8(); (c, 1) }); let mut offset = (initial_removed + n_range.start) as isize; let normalized = dest .into_iter() .map(|(c, changes): (char, i32)| { let idx = offset as usize; let align = if changes.is_positive() { if idx < 1 { (0, 0) } else { // This is a newly inserted character, so it shares the same alignment // than the previous one self.alignments[idx - 1] } } else { self.alignments[idx] }; // If we are replacing a character, find it and compute the change in size let replaced_char = if !changes.is_positive() { replaced_normalized.next() } else { None }; let replaced_char_size = replaced_char.map_or(0, |c| c.len_utf8()); // If we are removing some characters, find them too let total_bytes_to_remove = if changes.is_negative() { (&mut replaced_normalized) .take(-changes as usize) .map(|c| c.len_utf8()) .sum() } else { 0 }; // Keep track of the changes for next offsets offset += replaced_char_size as isize; offset += total_bytes_to_remove as isize; new_alignments.extend((0..c.len_utf8()).map(|_| align)); // Then we keep only the char for string reconstruction c }) .collect::<String>(); new_normalized.push_str(&normalized); last_end = end; } }); // Copy the remaining part of the input new_normalized.push_str(&self.normalized[last_end..]); new_alignments.extend(&self.alignments[last_end..]); self.normalized = new_normalized; self.alignments = new_alignments; Ok(()) } /// Clear the normalized part of the string pub fn clear(&mut self) -> usize { let len = self.len(); self.transform(std::iter::empty(), len); len } /// Split the current string in many subparts. Specify what to do with the /// delimiter. /// /// ## Splitting Behavior for the delimiter /// /// The behavior can be one of the followings: /// When splitting on `'-'` for example, with input `the-final--countdown`: /// - Removed => `[ "the", "", "final", "", "", "countdown" ]` /// - Isolated => `[ "the", "-", "final", "-", "-", "countdown" ]` /// - MergedWithPrevious => `[ "the-", "final-", "-", "countdown" ]` /// - MergedWithNext => `[ "the", "-final", "-", "-countdown" ]` pub fn split<P: Pattern>( &self, pattern: P, behavior: SplitDelimiterBehavior, ) -> Result<Vec<NormalizedString>> { let matches = pattern.find_matches(&self.normalized)?; // Process the matches according to the selected behavior: Vec<(Offsets, should_remove)> use SplitDelimiterBehavior::*; let splits = match behavior { Isolated => matches .into_iter() .map(|(offsets, _)| (offsets, false)) .collect(), Removed => matches, Contiguous => { let mut previous_match = false; matches .into_iter() .fold(vec![], |mut acc, (offsets, is_match)| { if is_match == previous_match { if let Some(((_, end), _)) = acc.last_mut() { *end = offsets.1; } else { acc.push((offsets, false)); } } else { acc.push((offsets, false)); } previous_match = is_match; acc }) } MergedWithPrevious => { let mut previous_match = false; matches .into_iter() .fold(vec![], |mut acc, (offsets, is_match)| { if is_match && !previous_match { if let Some(((_, end), _)) = acc.last_mut() { *end = offsets.1; } else { acc.push((offsets, false)); } } else { acc.push((offsets, false)); } previous_match = is_match; acc }) } MergedWithNext => { let mut previous_match = false; let mut matches = matches .into_iter() .rev() .fold(vec![], |mut acc, (offsets, is_match)| { if is_match && !previous_match { if let Some(((start, _), _)) = acc.last_mut() { *start = offsets.0; } else { acc.push((offsets, false)); } } else { acc.push((offsets, false)); } previous_match = is_match; acc }); matches.reverse(); matches } }; // Then we split according to the computed splits Ok(splits .into_iter() .filter_map(|(offsets, remove)| { if !remove { Some( self.slice(Range::Normalized(offsets.0..offsets.1)) .expect("NormalizedString bad split"), ) } else { None } }) .collect()) } /// Remove any leading space(s) of the normalized string pub fn lstrip(&mut self) -> &mut Self { self.lrstrip(true, false) } /// Remove any trailing space(s) of the normalized string pub fn rstrip(&mut self) -> &mut Self { self.lrstrip(false, true) } /// Remove any leading and trailing space(s) of the normalized string pub fn strip(&mut self) -> &mut Self { self.lrstrip(true, true) } fn lrstrip(&mut self, left: bool, right: bool) -> &mut Self { let leading_spaces = if left { self.get().chars().take_while(|c| c.is_whitespace()).count() } else { 0 }; let trailing_spaces = if right { self.get() .chars() .rev() .take_while(|c| c.is_whitespace()) .count() } else { 0 }; if leading_spaces > 0 || trailing_spaces > 0 { let count = self.get().chars().count(); let transformation = self .normalized .chars() .enumerate() .filter_map(|(i, c)| { if i < leading_spaces || i >= count - trailing_spaces { None } else if i == self.len() - trailing_spaces - 1 { Some((c, -(trailing_spaces as isize))) } else { Some((c, 0)) } }) .collect::<Vec<_>>(); self.transform(transformation, leading_spaces); } self } /// Returns the length of the normalized string (counting chars not bytes) pub fn len(&self) -> usize { self.normalized.len() } /// Returns the length of the original string (counting chars not bytes) pub fn len_original(&self) -> usize { self.original.len() } /// Whether empty pub fn is_empty(&self) -> bool { self.normalized.is_empty() } /// Recalculate original alignments #[allow(dead_code)] pub(crate) fn alignments_original(&self) -> Vec<(usize, usize)> { // Start, end are in alignments // offset, length are in alignments_original let mut alignments_original = Vec::with_capacity(self.original.len()); // Eventual gap before first group let start = self.alignments[0].0; if start != 0 { alignments_original.extend(vec![(0, 0); start]); } let mut last = (&self.alignments[0].0, &self.alignments[0].1); let mut offset = 0; let mut length = 0; for (start, end) in &self.alignments { if last == (start, end) { // This is the same group length += 1; } else { // This is a new group if start < last.1 { panic!("We can't have overlapping ranges."); } // Add the old group alignments_original.extend(vec![(offset, offset + length); last.1 - last.0]); offset += length; length = 1; // Eventual gap between the 2 groups alignments_original.extend(vec![(offset, offset); start - last.1]); } last = (start, end); } // Add the last group alignments_original.extend(vec![(offset, offset + length); last.1 - last.0]); // Add eventual last gap offset += length; alignments_original.extend(vec![ (offset, offset); self.original.len() - alignments_original.len() ]); // assert_eq!(alignments_original.len(), self.original.len()); alignments_original } } /// Returns the range covered by a slice of alignments fn expand_alignments(alignments: &[(usize, usize)]) -> Option<std::ops::Range<usize>> { if alignments.is_empty() { None } else { let start = alignments[0].0; let end = alignments[alignments.len() - 1].1; Some(start..end) } } /// Returns a range of the given string slice, by indexing chars instead of bytes pub fn get_range_of<T: RangeBounds<usize>>(s: &str, range: T) -> Option<&str> { let len = s.chars().count(); let start = match range.start_bound() { Bound::Unbounded => 0, Bound::Included(i) => *i, Bound::Excluded(i) => *i + 1, }; let end = match range.end_bound() { Bound::Unbounded => len, Bound::Included(i) => *i + 1, Bound::Excluded(i) => *i, }; if start == 0 && end == 0 { Some(&s[0..0]) } else if start >= len || end > len || start >= end { None } else { let start_b = s.char_indices().map(|(i, _)| i).nth(start).unwrap_or(0); let end_b = s.char_indices().map(|(i, _)| i).nth(end).unwrap_or(s.len()); Some(&s[start_b..end_b]) } } /// Convert the given range from bytes to char pub fn bytes_to_char(s: &str, range: std::ops::Range<usize>) -> Option<std::ops::Range<usize>> { let (mut start, mut end) = if range == (0..0) { (Some(0), Some(0)) } else { (None, None) }; s.char_indices() .enumerate() .take_while(|(_, (b, _))| *b <= range.end) .filter(|(_, (b, _))| *b >= range.start) .for_each(|(i, (b, c))| { if b == range.start { start = Some(i); } if b == range.end { end = Some(i); } if b + c.len_utf8() == range.end { end = Some(i + 1); } }); Some(start?..end?) } /// Convert the given range from char to bytes pub fn char_to_bytes(s: &str, range: std::ops::Range<usize>) -> Option<std::ops::Range<usize>> { let (mut start, mut end) = if range == (0..0) { (Some(0), Some(0)) } else { (None, None) }; if range.start == range.end { s.char_indices() .skip(range.start) .take(1) .for_each(|(b, _)| { start = Some(b); end = Some(b); }); } else { s.char_indices() .skip(range.start) .take(range.end - range.start) .for_each(|(b, c)| { if start.is_none() { start = Some(b); } end = Some(b + c.len_utf8()); }); } Some(start?..end?) } impl From<String> for NormalizedString { fn from(s: String) -> Self { let alignments = s .char_indices() .flat_map(|(b, c)| { let len = c.len_utf8(); (0..len).map(move |_| (b, b + len)) }) .collect::<Vec<_>>(); Self { original: s.clone(), normalized: s, alignments, original_shift: 0, } } } impl From<&str> for NormalizedString { fn from(s: &str) -> Self { Self::from(s.to_owned()) } } #[cfg(test)] mod tests { use super::*; use regex::Regex; use unicode_categories::UnicodeCategories; #[test] fn nfd_adds_new_chars() { let mut n = NormalizedString::from("élégant"); n.nfd(); assert_eq!( &n.alignments, &[ (0, 2), (0, 2), (0, 2), (2, 3), (3, 5), (3, 5), (3, 5), (5, 6), (6, 7), (7, 8), (8, 9) ] ); assert_eq!( n.alignments_original(), vec![ (0, 3), (0, 3), (3, 4), (4, 7), (4, 7), (7, 8), (8, 9), (9, 10), (10, 11) ] ); } #[test] fn remove_chars_added_by_nfd() { let mut n = NormalizedString::from("élégant"); n.nfd().filter(|c| !c.is_mark_nonspacing()); assert_eq!(n.get(), "elegant"); assert_eq!( &n.alignments, &[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 9)] ); assert_eq!( n.alignments_original(), vec![ (0, 1), (0, 1), (1, 2), (2, 3), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7) ] ); } #[test] fn remove_chars() { let mut n = NormalizedString::from("élégant"); n.filter(|c| c != 'n'); assert_eq!(n.get(), "élégat"); assert_eq!( &n.alignments, &[ (0, 2), (0, 2), (2, 3), (3, 5), (3, 5), (5, 6), (6, 7), // Skipped range (8, 9) ] ); assert_eq!( n.alignments_original(), vec![ (0, 2), (0, 2), (2, 3), (3, 5), (3, 5), (5, 6), (6, 7), (7, 7), // Eaten n (7, 8) ] ); } #[test] fn mixed_addition_and_removal() { let mut n = NormalizedString::from("élégant"); n.nfd().filter(|c| !c.is_mark_nonspacing() && c != 'n'); assert_eq!(n.get(), "elegat"); assert_eq!( &n.alignments, &[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (8, 9)] ); assert_eq!( n.alignments_original(), vec![ (0, 1), (0, 1), (1, 2), (2, 3), (2, 3), (3, 4), // g (4, 5), // a (5, 5), // Eaten n (5, 6) ] ); } #[test] fn range_conversion() { let mut n = NormalizedString::from(" __Hello__ "); n.filter(|c| !c.is_whitespace()).lowercase(); let hello_n = n.convert_offsets(Range::Original(6..11)); assert_eq!(hello_n, Some(2..7)); assert_eq!( n.get_range(Range::Normalized(hello_n.clone().unwrap())), Some("hello") ); assert_eq!( n.get_range_original(Range::Normalized(hello_n.unwrap())), Some("Hello") ); assert_eq!(n.get_range(Range::Original(6..11)), Some("hello")); assert_eq!(n.get_range_original(Range::Original(6..11)), Some("Hello")); // Make sure we get None only in specific cases assert_eq!(n.convert_offsets(Range::Original(0..0)), Some(0..0)); assert_eq!(n.convert_offsets(Range::Original(3..3)), Some(3..3)); assert_eq!(n.convert_offsets(Range::Original(15..)), Some(9..9)); assert_eq!(n.convert_offsets(Range::Original(16..)), Some(16..16)); assert_eq!(n.convert_offsets(Range::Original(17..)), None); assert_eq!(n.convert_offsets(Range::Normalized(0..0)), Some(0..0)); assert_eq!(n.convert_offsets(Range::Normalized(3..3)), Some(3..3)); assert_eq!(n.convert_offsets(Range::Normalized(9..)), Some(9..9)); assert_eq!(n.convert_offsets(Range::Normalized(10..)), None); } #[test] fn original_range() { let mut n = NormalizedString::from("Hello_______ World!"); n.filter(|c| c != '_').lowercase(); let world_n = n.get_range(Range::Normalized(6..11)).unwrap(); let world_o = n.get_range_original(Range::Normalized(6..11)).unwrap(); assert_eq!(world_n, "world"); assert_eq!(world_o, "World"); let original_range = Range::Original(n.convert_offsets(Range::Normalized(6..11)).unwrap()); assert_eq!(n.get_range(original_range.clone()).unwrap(), "world"); assert_eq!( n.get_range_original(original_range.clone()).unwrap(), "World" ); assert_eq!(original_range.into_full_range(n.len_original()), 13..18); } #[test] fn added_around_edges() { let mut n = NormalizedString::from("Hello"); n.transform( vec![ (' ', 1), ('H', 0), ('e', 0), ('l', 0), ('l', 0), ('o', 0), (' ', 1), ], 0, ); assert_eq!(&n.normalized, " Hello "); assert_eq!( n.get_range_original(Range::Normalized(1..n.normalized.len() - 1)), Some("Hello") ); } #[test] fn added_characters_alignment() { let mut n = NormalizedString::from("野口 No"); n.transform( n.get().to_owned().chars().flat_map(|c| { if (c as usize) > 0x4E00 { vec![(' ', 0), (c, 1), (' ', 1)] } else { vec![(c, 0)] } }), 0, ); assert_eq!( n, NormalizedString { original: "野口 No".into(), normalized: " 野 口 No".into(), alignments: vec![ (0, 3), (0, 3), (0, 3), (0, 3), (0, 3), (3, 6), (3, 6), (3, 6), (3, 6), (3, 6), (6, 7), (7, 8), (8, 9) ], original_shift: 0 } ); assert_eq!( n.alignments_original(), vec![ (0, 5), (0, 5), (0, 5), (5, 10), (5, 10), (5, 10), (10, 11), (11, 12), (12, 13) ] ); } #[test] fn remove_at_beginning() { let mut n = NormalizedString::from(" Hello"); n.filter(|c| !c.is_whitespace()); assert_eq!( n.get_range_original(Range::Normalized(1.."Hello".len())), Some("ello") ); assert_eq!( n.get_range_original(Range::Normalized(0..n.normalized.len())), Some("Hello") ); } #[test] fn remove_at_end() { let mut n = NormalizedString::from("Hello "); n.filter(|c| !c.is_whitespace()); assert_eq!(n.get_range_original(Range::Normalized(0..4)), Some("Hell")); assert_eq!( n.get_range_original(Range::Normalized(0..n.normalized.len())), Some("Hello") ); } #[test] fn removed_around_both_edges() { let mut n = NormalizedString::from(" Hello "); n.filter(|c| !c.is_whitespace()); assert_eq!(&n.normalized, "Hello"); assert_eq!( n.get_range_original(Range::Normalized(0.."Hello".len())), Some("Hello") ); assert_eq!( n.get_range_original(Range::Normalized(1.."Hell".len())), Some("ell") ); } #[test] fn lstrip() { let mut n = NormalizedString::from(" This is an example "); n.lstrip(); assert_eq!(&n.normalized, "This is an example "); assert_eq!( n.get_range_original(Range::Normalized(0..n.normalized.len())), Some("This is an example ") ); } #[test] fn rstrip() { let mut n = NormalizedString::from(" This is an example "); n.rstrip(); assert_eq!(&n.normalized, " This is an example"); assert_eq!( n.get_range_original(Range::Normalized(0..n.normalized.len())), Some(" This is an example") ); } #[test] fn strip() { let mut n = NormalizedString::from(" This is an example "); n.strip(); assert_eq!(&n.normalized, "This is an example"); assert_eq!( n.get_range_original(Range::Normalized(0..n.normalized.len())), Some("This is an example") ); } #[test] fn strip_unicode() { let mut n = NormalizedString::from(" 你好asa \n"); n.strip(); assert_eq!(&n.normalized, "你好asa"); assert_eq!( n.get_range_original(Range::Normalized(0..n.normalized.len())), Some("你好asa") ); } #[test] fn prepend() { let mut n = NormalizedString::from("there"); n.prepend("Hey "); assert_eq!(&n.normalized, "Hey there"); assert_eq!( n.alignments, vec![ (0, 1), (0, 1), (0, 1), (0, 1), (0, 1), (1, 2), (2, 3), (3, 4), (4, 5) ] ); assert_eq!(n.convert_offsets(Range::Normalized(0..4)), Some(0..1)); } #[test] fn append() { let mut n = NormalizedString::from("Hey"); n.append(" there"); assert_eq!(&n.normalized, "Hey there"); assert_eq!( n.alignments, vec![ (0, 1), (1, 2), (2, 3), (2, 3), (2, 3), (2, 3), (2, 3), (2, 3), (2, 3) ] ); assert_eq!( n.convert_offsets(Range::Normalized(3.." there".len())), Some(2..3) ); } #[test] fn get_range() { let s = String::from("Hello my name is John 👋"); assert_eq!(get_range_of(&s, ..), Some(&s[..])); assert_eq!(get_range_of(&s, 17..), Some("John 👋")); } #[test] fn slice() { let mut s = NormalizedString::from("𝔾𝕠𝕠𝕕 𝕞𝕠𝕣𝕟𝕚𝕟𝕘"); s.nfkc(); let original_slice = s.slice(Range::Original(0..4)).unwrap(); assert_eq!(original_slice.get(), "G"); assert_eq!(original_slice.get_original(), "𝔾"); let normalized_slice = s.slice(Range::Normalized(0..4)).unwrap(); assert_eq!(normalized_slice.get(), "Good"); assert_eq!(normalized_slice.get_original(), "𝔾𝕠𝕠𝕕"); // Make sure the sliced NormalizedString is still aligned as expected let mut s = NormalizedString::from(" Good Morning! "); s.strip(); // If we keep the whole slice let slice = s.slice(Range::Original(..)).unwrap(); assert_eq!( slice.get_range_original(Range::Normalized(0..4)), Some("Good") ); let slice = s.slice(Range::Normalized(..)).unwrap(); assert_eq!( slice.get_range_original(Range::Normalized(0..4)), Some("Good") ); // If we keep after the modified piece let slice = s.slice(Range::Original(4..15)).unwrap(); assert_eq!( slice.get_range_original(Range::Normalized(0..3)), Some("ood") ); // If we keep only the modified piece let slice = s.slice(Range::Original(3..16)).unwrap(); assert_eq!( slice.get_range_original(Range::Normalized(0..4)), Some("Good") ); } #[test] fn replace() { // Simple let mut s = NormalizedString::from(" Hello friend "); s.replace(' ', "_").unwrap(); assert_eq!(s.get(), "_Hello___friend_"); let mut s = NormalizedString::from("aaaab"); s.replace('a', "b").unwrap(); assert_eq!(s.get(), "bbbbb"); // Overlapping let mut s = NormalizedString::from("aaaab"); s.replace("aaa", "b").unwrap(); assert_eq!(s.get(), "bab"); // Regex let mut s = NormalizedString::from(" Hello friend "); let re = Regex::new(r"\s+").unwrap(); s.replace(&re, "_").unwrap(); assert_eq!(s.get(), "_Hello_friend_"); } #[test] fn split() { use SplitDelimiterBehavior::*; let s = NormalizedString::from("The-final--countdown"); let test = |behavior: SplitDelimiterBehavior, result: Vec<&str>| { let splits = s.split('-', behavior).unwrap(); assert_eq!(splits.iter().map(|n| n.get()).collect::<Vec<_>>(), result); }; test(Removed, vec!["The", "final", "countdown"]); test(Isolated, vec!["The", "-", "final", "-", "-", "countdown"]); test(MergedWithPrevious, vec!["The-", "final-", "-", "countdown"]); test(MergedWithNext, vec!["The", "-final", "-", "-countdown"]); test(Contiguous, vec!["The", "-", "final", "--", "countdown"]); } #[test] fn transform_range_single_bytes() { let s = NormalizedString::from("Hello friend"); // Removing at the beginning let mut current = s.clone(); current.transform_range(Range::Original(0..4), vec![('Y', 0)], 3); assert_eq!( current, NormalizedString { original: "Hello friend".into(), normalized: "Yo friend".into(), alignments: vec![ (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 12) ], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (0, 0), (0, 0), (0, 0), (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9) ] ); // Removing in the middle let mut current = s.clone(); current.transform_range( Range::Original(3..10), vec![('_', 0), ('F', 0), ('R', -2)], 2, ); assert_eq!( current, NormalizedString { original: "Hello friend".into(), normalized: "Hel_FRnd".into(), alignments: vec![ (0, 1), (1, 2), (2, 3), (5, 6), (6, 7), (7, 8), (10, 11), (11, 12) ], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (0, 1), (1, 2), (2, 3), (3, 3), (3, 3), (3, 4), (4, 5), (5, 6), (6, 6), (6, 6), (6, 7), (7, 8) ] ); // Removing at the end let mut current = s.clone(); current.transform_range(Range::Original(5..), vec![('_', 0), ('F', -5)], 0); assert_eq!( current, NormalizedString { original: "Hello friend".into(), normalized: "Hello_F".into(), alignments: vec![(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7)], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 7), (7, 7), (7, 7), (7, 7), (7, 7) ] ); // Adding at the beginning let mut current = s.clone(); current.transform_range(Range::Original(0..1), vec![('H', 1), ('H', 0)], 0); assert_eq!( current, NormalizedString { original: "Hello friend".into(), normalized: "HHello friend".into(), alignments: vec![ (0, 0), (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 12) ], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 12), (12, 13) ] ); // Equivalent to the previous one let mut current = s.clone(); current.transform_range(Range::Original(0..0), vec![('H', 1)], 0); assert_eq!( current, NormalizedString { original: "Hello friend".into(), normalized: "HHello friend".into(), alignments: vec![ (0, 0), (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 12) ], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 12), (12, 13) ] ); // Adding as part of the first character let mut current = s.clone(); current.transform_range(Range::Original(0..1), vec![('H', 0), ('H', 1)], 0); assert_eq!( current, NormalizedString { original: "Hello friend".into(), normalized: "HHello friend".into(), alignments: vec![ (0, 1), (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 12) ], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (0, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 12), (12, 13) ] ); // Adding in the middle let mut current = s.clone(); current.transform_range( Range::Original(5..6), vec![('_', 0), ('m', 1), ('y', 1), ('_', 1)], 0, ); assert_eq!( current, NormalizedString { original: "Hello friend".into(), normalized: "Hello_my_friend".into(), alignments: vec![ (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (5, 6), (5, 6), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 12) ], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 9), (9, 10), (10, 11), (11, 12), (12, 13), (13, 14), (14, 15) ] ); // Adding at the end let mut current = s; current.transform_range(Range::Original(11..), vec![('d', 0), ('_', 1), ('!', 1)], 0); assert_eq!( current, NormalizedString { original: "Hello friend".into(), normalized: "Hello friend_!".into(), alignments: vec![ (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 12), (11, 12), (11, 12) ], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 14) ] ); } #[test] fn transform_range_multiple_bytes() { let s = NormalizedString::from("𝔾𝕠𝕠𝕕"); // Removing at the beginning let mut current = s.clone(); current.transform_range(Range::Original(0..8), vec![('G', -1)], 0); assert_eq!( current, NormalizedString { original: "𝔾𝕠𝕠𝕕".into(), normalized: "G𝕠𝕕".into(), alignments: vec![ (0, 4), (8, 12), (8, 12), (8, 12), (8, 12), (12, 16), (12, 16), (12, 16), (12, 16) ], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (0, 1), (0, 1), (0, 1), (0, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 5), (1, 5), (1, 5), (1, 5), (5, 9), (5, 9), (5, 9), (5, 9) ] ); assert_eq!(current.get_range(Range::Original(0..8)).unwrap(), "G"); assert_eq!(current.get_range(Range::Original(0..4)).unwrap(), "G"); assert_eq!( current.get_range_original(Range::Original(0..4)).unwrap(), "𝔾" ); assert_eq!( current.get_range_original(Range::Original(0..8)).unwrap(), "𝔾𝕠" ); // Removing in the middle let mut current = s.clone(); current.transform_range(Range::Original(4..12), vec![('o', -1)], 0); assert_eq!( current, NormalizedString { original: "𝔾𝕠𝕠𝕕".into(), normalized: "𝔾o𝕕".into(), alignments: vec![ (0, 4), (0, 4), (0, 4), (0, 4), (4, 8), (12, 16), (12, 16), (12, 16), (12, 16) ], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (0, 4), (0, 4), (0, 4), (0, 4), (4, 5), (4, 5), (4, 5), (4, 5), (5, 5), (5, 5), (5, 5), (5, 5), (5, 9), (5, 9), (5, 9), (5, 9) ] ); // Removing at the end let mut current = s.clone(); current.transform_range(Range::Original(12..), vec![('d', 0), ('!', 1)], 0); assert_eq!( current, NormalizedString { original: "𝔾𝕠𝕠𝕕".into(), normalized: "𝔾𝕠𝕠d!".into(), alignments: vec![ (0, 4), (0, 4), (0, 4), (0, 4), (4, 8), (4, 8), (4, 8), (4, 8), (8, 12), (8, 12), (8, 12), (8, 12), (12, 16), (12, 16) ], original_shift: 0, } ); // Adding at the beginning let mut current = s.clone(); current.transform_range(Range::Original(0..4), vec![('_', 1), ('𝔾', 0)], 0); assert_eq!( current, NormalizedString { original: "𝔾𝕠𝕠𝕕".into(), normalized: "_𝔾𝕠𝕠𝕕".into(), alignments: vec![ (0, 0), (0, 4), (0, 4), (0, 4), (0, 4), (4, 8), (4, 8), (4, 8), (4, 8), (8, 12), (8, 12), (8, 12), (8, 12), (12, 16), (12, 16), (12, 16), (12, 16) ], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (1, 5), (1, 5), (1, 5), (1, 5), (5, 9), (5, 9), (5, 9), (5, 9), (9, 13), (9, 13), (9, 13), (9, 13), (13, 17), (13, 17), (13, 17), (13, 17) ] ); assert_eq!(current.get_range(Range::Original(0..8)).unwrap(), "𝔾𝕠"); assert_eq!(current.get_range(Range::Original(0..4)).unwrap(), "𝔾"); assert_eq!( current.get_range_original(Range::Original(0..4)).unwrap(), "𝔾" ); assert_eq!( current.get_range_original(Range::Original(0..8)).unwrap(), "𝔾𝕠" ); // Equivalent to the previous one let mut current = s.clone(); current.transform_range(Range::Original(0..0), vec![('_', 1)], 0); assert_eq!( current, NormalizedString { original: "𝔾𝕠𝕠𝕕".into(), normalized: "_𝔾𝕠𝕠𝕕".into(), alignments: vec![ (0, 0), (0, 4), (0, 4), (0, 4), (0, 4), (4, 8), (4, 8), (4, 8), (4, 8), (8, 12), (8, 12), (8, 12), (8, 12), (12, 16), (12, 16), (12, 16), (12, 16) ], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (1, 5), (1, 5), (1, 5), (1, 5), (5, 9), (5, 9), (5, 9), (5, 9), (9, 13), (9, 13), (9, 13), (9, 13), (13, 17), (13, 17), (13, 17), (13, 17) ] ); assert_eq!(current.get_range(Range::Original(0..8)).unwrap(), "𝔾𝕠"); assert_eq!(current.get_range(Range::Original(0..4)).unwrap(), "𝔾"); assert_eq!( current.get_range_original(Range::Original(0..4)).unwrap(), "𝔾" ); assert_eq!( current.get_range_original(Range::Original(0..8)).unwrap(), "𝔾𝕠" ); // Adding as part of the first character let mut current = s.clone(); current.transform_range(Range::Original(0..4), vec![('𝔾', 0), ('o', 1)], 0); assert_eq!( current, NormalizedString { original: "𝔾𝕠𝕠𝕕".into(), normalized: "𝔾o𝕠𝕠𝕕".into(), alignments: vec![ (0, 4), (0, 4), (0, 4), (0, 4), (0, 4), (4, 8), (4, 8), (4, 8), (4, 8), (8, 12), (8, 12), (8, 12), (8, 12), (12, 16), (12, 16), (12, 16), (12, 16) ], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (0, 5), (0, 5), (0, 5), (0, 5), (5, 9), (5, 9), (5, 9), (5, 9), (9, 13), (9, 13), (9, 13), (9, 13), (13, 17), (13, 17), (13, 17), (13, 17) ] ); assert_eq!(current.get_range(Range::Original(0..8)).unwrap(), "𝔾o𝕠"); assert_eq!(current.get_range(Range::Original(0..4)).unwrap(), "𝔾o"); assert_eq!( current.get_range_original(Range::Original(0..4)).unwrap(), "𝔾" ); assert_eq!( current.get_range_original(Range::Original(0..8)).unwrap(), "𝔾𝕠" ); // Adding in the middle let mut current = s.clone(); current.transform_range( Range::Original(4..8), vec![('𝕠', 0), ('o', 1), ('o', 1), ('o', 1)], 0, ); assert_eq!( current, NormalizedString { original: "𝔾𝕠𝕠𝕕".into(), normalized: "𝔾𝕠ooo𝕠𝕕".into(), alignments: vec![ (0, 4), (0, 4), (0, 4), (0, 4), (4, 8), (4, 8), (4, 8), (4, 8), (4, 8), (4, 8), (4, 8), (8, 12), (8, 12), (8, 12), (8, 12), (12, 16), (12, 16), (12, 16), (12, 16) ], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (0, 4), (0, 4), (0, 4), (0, 4), (4, 11), (4, 11), (4, 11), (4, 11), (11, 15), (11, 15), (11, 15), (11, 15), (15, 19), (15, 19), (15, 19), (15, 19) ] ); // Adding at the end let mut current = s; current.transform_range(Range::Original(16..), vec![('!', 1)], 0); assert_eq!( current, NormalizedString { original: "𝔾𝕠𝕠𝕕".into(), normalized: "𝔾𝕠𝕠𝕕!".into(), alignments: vec![ (0, 4), (0, 4), (0, 4), (0, 4), (4, 8), (4, 8), (4, 8), (4, 8), (8, 12), (8, 12), (8, 12), (8, 12), (12, 16), (12, 16), (12, 16), (12, 16), (12, 16) ], original_shift: 0, } ); assert_eq!( current.alignments_original(), vec![ (0, 4), (0, 4), (0, 4), (0, 4), (4, 8), (4, 8), (4, 8), (4, 8), (8, 12), (8, 12), (8, 12), (8, 12), (12, 17), (12, 17), (12, 17), (12, 17) ] ); } #[test] fn transform_check() { let mut s = NormalizedString::from("abc…"); s.nfkd(); let transforms = vec![('a', -2), ('.', 0), ('.', 0), ('.', 0)]; s.transform(transforms, 0); s.lowercase(); assert_eq!(s.get(), "a..."); } }
tokenizers/tokenizers/src/tokenizer/normalizer.rs/0
{ "file_path": "tokenizers/tokenizers/src/tokenizer/normalizer.rs", "repo_id": "tokenizers", "token_count": 42416 }
279
use tokenizers::models::bpe::{BpeTrainerBuilder, BPE}; use tokenizers::normalizers::{Sequence, Strip, NFC}; use tokenizers::pre_tokenizers::byte_level::ByteLevel; use tokenizers::{AddedToken, TokenizerBuilder}; use tokenizers::{DecoderWrapper, NormalizerWrapper, PostProcessorWrapper, PreTokenizerWrapper}; use tokenizers::{Tokenizer, TokenizerImpl}; #[test] fn train_tokenizer() { let vocab_size: usize = 100; let mut tokenizer = TokenizerBuilder::new() .with_model(BPE::default()) .with_normalizer(Some(Sequence::new(vec![ Strip::new(true, true).into(), NFC.into(), ]))) .with_pre_tokenizer(Some(ByteLevel::default())) .with_post_processor(Some(ByteLevel::default())) .with_decoder(Some(ByteLevel::default())) .build() .unwrap(); let mut trainer = BpeTrainerBuilder::new() .show_progress(false) .vocab_size(vocab_size) .min_frequency(0) .special_tokens(vec![ AddedToken::from(String::from("<s>"), true), AddedToken::from(String::from("<pad>"), true), AddedToken::from(String::from("</s>"), true), AddedToken::from(String::from("<unk>"), true), AddedToken::from(String::from("<mask>"), true), ]) .build(); let pretty = true; tokenizer .train_from_files(&mut trainer, vec!["data/small.txt".to_string()]) .unwrap() .save("data/tokenizer.json", pretty) .unwrap(); } #[test] fn load_tokenizer() { let tokenizer = Tokenizer::from_file("data/roberta.json").unwrap(); let example = "This is an example"; let ids = vec![713, 16, 41, 1246]; let tokens = vec!["This", "Ġis", "Ġan", "Ġexample"]; let encodings = tokenizer.encode(example, false).unwrap(); assert_eq!(encodings.get_ids(), ids); assert_eq!(encodings.get_tokens(), tokens); let decoded = tokenizer.decode(&ids, false).unwrap(); assert_eq!(decoded, example); } #[test] #[ignore] fn quicktour_slow_train() -> tokenizers::Result<()> { // START quicktour_init_tokenizer use tokenizers::models::bpe::BPE; let mut tokenizer: TokenizerImpl< BPE, NormalizerWrapper, PreTokenizerWrapper, PostProcessorWrapper, DecoderWrapper, > = TokenizerImpl::new( BPE::builder() .unk_token("[UNK]".to_string()) .build() .unwrap(), ); // END quicktour_init_tokenizer // START quicktour_init_trainer use tokenizers::models::bpe::BpeTrainer; let mut trainer = BpeTrainer::builder() .special_tokens(vec![ AddedToken::from("[UNK]", true), AddedToken::from("[CLS]", true), AddedToken::from("[SEP]", true), AddedToken::from("[PAD]", true), AddedToken::from("[MASK]", true), ]) .build(); // END quicktour_init_trainer // START quicktour_init_pretok use tokenizers::pre_tokenizers::whitespace::Whitespace; tokenizer.with_pre_tokenizer(Some(Whitespace {})); // END quicktour_init_pretok // START quicktour_train let files = vec![ "data/wikitext-103-raw/wiki.train.raw".into(), "data/wikitext-103-raw/wiki.test.raw".into(), "data/wikitext-103-raw/wiki.valid.raw".into(), ]; tokenizer.train_from_files(&mut trainer, files)?; // END quicktour_train // START quicktour_save tokenizer.save("data/tokenizer-wiki.json", false)?; // END quicktour_save Ok(()) } #[test] fn quicktour() -> tokenizers::Result<()> { // START quicktour_reload_tokenizer let mut tokenizer = Tokenizer::from_file("data/tokenizer-wiki.json")?; // END quicktour_reload_tokenizer // START quicktour_encode let output = tokenizer.encode("Hello, y'all! How are you 😁 ?", true)?; // END quicktour_encode // START quicktour_print_tokens println!("{:?}", output.get_tokens()); // ["Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?",] // END quicktour_print_tokens assert_eq!( output.get_tokens(), ["Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?",] ); // START quicktour_print_ids println!("{:?}", output.get_ids()); // [27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35] // END quicktour_print_ids assert_eq!( output.get_ids(), [27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35] ); // START quicktour_print_offsets println!("{:?}", output.get_offsets()[9]); // (26, 30) // END quicktour_print_offsets assert_eq!(output.get_offsets()[9], (26, 30)); // START quicktour_use_offsets let sentence = "Hello, y'all! How are you 😁 ?"; println!("{}", &sentence[26..30]); // "😁" // END quicktour_use_offsets // START quicktour_check_sep println!("{}", tokenizer.token_to_id("[SEP]").unwrap()); // 2 // END quicktour_check_sep assert_eq!(tokenizer.token_to_id("[SEP]"), Some(2)); // START quicktour_init_template_processing use tokenizers::processors::template::TemplateProcessing; let special_tokens = vec![ ("[CLS]", tokenizer.token_to_id("[CLS]").unwrap()), ("[SEP]", tokenizer.token_to_id("[SEP]").unwrap()), ]; tokenizer.with_post_processor(Some( TemplateProcessing::builder() .try_single("[CLS] $A [SEP]") .unwrap() .try_pair("[CLS] $A [SEP] $B:1 [SEP]:1") .unwrap() .special_tokens(special_tokens) .build()?, )); // END quicktour_init_template_processing // START quicktour_print_special_tokens let output = tokenizer.encode("Hello, y'all! How are you 😁 ?", true)?; println!("{:?}", output.get_tokens()); // ["[CLS]", "Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?", "[SEP]"] // END quicktour_print_special_tokens assert_eq!( output.get_tokens(), ["[CLS]", "Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?", "[SEP]"] ); // START quicktour_print_special_tokens_pair let output = tokenizer.encode(("Hello, y'all!", "How are you 😁 ?"), true)?; println!("{:?}", output.get_tokens()); // ["[CLS]", "Hello", ",", "y", "'", "all", "!", "[SEP]", "How", "are", "you", "[UNK]", "?", "[SEP]"] // END quicktour_print_special_tokens_pair assert_eq!( output.get_tokens(), [ "[CLS]", "Hello", ",", "y", "'", "all", "!", "[SEP]", "How", "are", "you", "[UNK]", "?", "[SEP]" ] ); // START quicktour_print_type_ids println!("{:?}", output.get_type_ids()); // [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] // END quicktour_print_type_ids assert_eq!( output.get_type_ids(), [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] ); // START quicktour_encode_batch let output = tokenizer.encode_batch(vec!["Hello, y'all!", "How are you 😁 ?"], true)?; // END quicktour_encode_batch println!("{:?}", output); // START quicktour_encode_batch_pair let output = tokenizer.encode_batch( vec![ ("Hello, y'all!", "How are you 😁 ?"), ("Hello to you too!", "I'm fine, thank you!"), ], true, )?; // END quicktour_encode_batch_pair println!("{:?}", output); // START quicktour_enable_padding use tokenizers::PaddingParams; tokenizer.with_padding(Some(PaddingParams { pad_id: 3, pad_token: "[PAD]".to_string(), ..PaddingParams::default() })); // END quicktour_enable_padding // START quicktour_print_batch_tokens let output = tokenizer.encode_batch(vec!["Hello, y'all!", "How are you 😁 ?"], true)?; println!("{:?}", output[1].get_tokens()); // ["[CLS]", "How", "are", "you", "[UNK]", "?", "[SEP]", "[PAD]"] // END quicktour_print_batch_tokens assert_eq!( output[1].get_tokens(), ["[CLS]", "How", "are", "you", "[UNK]", "?", "[SEP]", "[PAD]"] ); // START quicktour_print_attention_mask println!("{:?}", output[1].get_attention_mask()); // [1, 1, 1, 1, 1, 1, 1, 0] // END quicktour_print_attention_mask assert_eq!(output[1].get_attention_mask(), [1, 1, 1, 1, 1, 1, 1, 0]); Ok(()) } #[test] fn pipeline() -> tokenizers::Result<()> { // START pipeline_reload_tokenizer use tokenizers::Tokenizer; let mut tokenizer = Tokenizer::from_file("data/tokenizer-wiki.json")?; // END pipeline_reload_tokenizer // START pipeline_setup_normalizer use tokenizers::normalizers::{ strip::StripAccents, unicode::NFD, utils::Sequence as NormalizerSequence, }; let normalizer = NormalizerSequence::new(vec![NFD.into(), StripAccents.into()]); // END pipeline_setup_normalizer // START pipeline_test_normalizer use tokenizers::{NormalizedString, Normalizer}; let mut normalized = NormalizedString::from("Héllò hôw are ü?"); normalizer.normalize(&mut normalized)?; println!("{}", normalized.get()); // "Hello how are u?" // END pipeline_test_normalizer assert_eq!(normalized.get(), "Hello how are u?"); // START pipeline_replace_normalizer tokenizer.with_normalizer(Some(normalizer)); // END pipeline_replace_normalizer // START pipeline_setup_pre_tokenizer use tokenizers::pre_tokenizers::whitespace::Whitespace; use tokenizers::{OffsetReferential, OffsetType, PreTokenizedString, PreTokenizer}; let pre_tokenizer = Whitespace {}; let mut pre_tokenized = PreTokenizedString::from("Hello! How are you? I'm fine, thank you."); pre_tokenizer.pre_tokenize(&mut pre_tokenized)?; println!( "{:?}", pre_tokenized.get_splits(OffsetReferential::Original, OffsetType::Byte) ); // [("Hello", (0, 5), None), ("!", (5, 6), None), ("How", (7, 10), None), // ("are", (11, 14), None), ("you", (15, 18), None), ("?", (18, 19), None), // ("I", (20, 21), None), ("\'", (21, 22), None), ("m", (22, 23), None), // ("fine", (24, 28), None), (",", (28, 29), None), ("thank", (30, 35), None), // ("you", (36, 39), None), (".", (39, 40), None)] // END pipeline_setup_pre_tokenizer assert_eq!( pre_tokenized.get_splits(OffsetReferential::Original, OffsetType::Byte), vec![ ("Hello", (0, 5), &None), ("!", (5, 6), &None), ("How", (7, 10), &None), ("are", (11, 14), &None), ("you", (15, 18), &None), ("?", (18, 19), &None), ("I", (20, 21), &None), ("\'", (21, 22), &None), ("m", (22, 23), &None), ("fine", (24, 28), &None), (",", (28, 29), &None), ("thank", (30, 35), &None), ("you", (36, 39), &None), (".", (39, 40), &None) ] ); // START pipeline_combine_pre_tokenizer use tokenizers::pre_tokenizers::{digits::Digits, sequence::Sequence}; let pre_tokenizer = Sequence::new(vec![Whitespace {}.into(), Digits::new(true).into()]); let mut pre_tokenized = PreTokenizedString::from("Call 911!"); pre_tokenizer.pre_tokenize(&mut pre_tokenized)?; println!( "{:?}", pre_tokenized.get_splits(OffsetReferential::Original, OffsetType::Byte) ); // END pipeline_combine_pre_tokenizer assert_eq!( pre_tokenized.get_splits(OffsetReferential::Original, OffsetType::Byte), vec![ ("Call", (0, 4), &None), ("9", (5, 6), &None), ("1", (6, 7), &None), ("1", (7, 8), &None), ("!", (8, 9), &None) ] ); // START pipeline_replace_pre_tokenizer tokenizer.with_pre_tokenizer(Some(pre_tokenizer)); // END pipeline_replace_pre_tokenizer // START pipeline_setup_processor use tokenizers::processors::template::TemplateProcessing; tokenizer.with_post_processor(Some( TemplateProcessing::builder() .try_single("[CLS] $A [SEP]") .unwrap() .try_pair("[CLS] $A [SEP] $B:1 [SEP]:1") .unwrap() .special_tokens(vec![("[CLS]", 1), ("[SEP]", 2)]) .build() .unwrap(), )); // END pipeline_setup_processor // START pipeline_test_decoding let output = tokenizer.encode("Hello, y'all! How are you 😁 ?", true)?; println!("{:?}", output.get_ids()); // [1, 27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35, 2] let decoded = tokenizer.decode( &[1, 27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35, 2], true, )?; println!("{}", decoded); // "Hello , y ' all ! How are you ?" // END pipeline_test_decoding Ok(()) } #[test] #[ignore] fn train_pipeline_bert() -> tokenizers::Result<()> { // START bert_setup_tokenizer use tokenizers::models::wordpiece::WordPiece; use tokenizers::Tokenizer; let mut bert_tokenizer = Tokenizer::new( WordPiece::builder() .unk_token("[UNK]".to_string()) .build() .unwrap(), ); // END bert_setup_tokenizer // START bert_setup_normalizer use tokenizers::normalizers::utils::Sequence as NormalizerSequence; use tokenizers::normalizers::{strip::StripAccents, unicode::NFD, utils::Lowercase}; bert_tokenizer.with_normalizer(Some(NormalizerSequence::new(vec![ NFD.into(), Lowercase.into(), StripAccents.into(), ]))); // END bert_setup_normalizer // START bert_setup_pre_tokenizer use tokenizers::pre_tokenizers::whitespace::Whitespace; bert_tokenizer.with_pre_tokenizer(Some(Whitespace {})); // END bert_setup_pre_tokenizer // START bert_setup_processor use tokenizers::processors::template::TemplateProcessing; bert_tokenizer.with_post_processor(Some( TemplateProcessing::builder() .try_single("[CLS] $A [SEP]") .unwrap() .try_pair("[CLS] $A [SEP] $B:1 [SEP]:1") .unwrap() .special_tokens(vec![("[CLS]", 1), ("[SEP]", 2)]) .build() .unwrap(), )); // END bert_setup_processor // START bert_train_tokenizer use tokenizers::models::{wordpiece::WordPieceTrainer, TrainerWrapper}; let mut trainer: TrainerWrapper = WordPieceTrainer::builder() .vocab_size(30_522) .special_tokens(vec![ AddedToken::from("[UNK]", true), AddedToken::from("[CLS]", true), AddedToken::from("[SEP]", true), AddedToken::from("[PAD]", true), AddedToken::from("[MASK]", true), ]) .build() .into(); let files = vec![ "data/wikitext-103-raw/wiki.train.raw".into(), "data/wikitext-103-raw/wiki.test.raw".into(), "data/wikitext-103-raw/wiki.valid.raw".into(), ]; bert_tokenizer.train_from_files(&mut trainer, files)?; bert_tokenizer.save("data/bert-wiki.json", false)?; // END bert_train_tokenizer Ok(()) } #[test] fn pipeline_bert() -> tokenizers::Result<()> { let mut bert_tokenizer = Tokenizer::from_file("data/bert-wiki.json")?; // START bert_test_decoding let output = bert_tokenizer.encode("Welcome to the 🤗 Tokenizers library.", true)?; println!("{:?}", output.get_tokens()); // ["[CLS]", "welcome", "to", "the", "[UNK]", "tok", "##eni", "##zer", "##s", "library", ".", "[SEP]"] let decoded = bert_tokenizer.decode(output.get_ids(), true)?; println!("{}", decoded); // "welcome to the tok ##eni ##zer ##s library ." // END bert_test_decoding assert_eq!( output.get_tokens(), &[ "[CLS]", "welcome", "to", "the", "[UNK]", "tok", "##eni", "##zer", "##s", "library", ".", "[SEP]" ] ); assert_eq!(decoded, "welcome to the tok ##eni ##zer ##s library ."); // START bert_proper_decoding use tokenizers::decoders::wordpiece::WordPiece as WordPieceDecoder; bert_tokenizer.with_decoder(Some(WordPieceDecoder::default())); let decoded = bert_tokenizer.decode(output.get_ids(), true)?; // "welcome to the tokenizers library." // END bert_proper_decoding assert_eq!(decoded, "welcome to the tokenizers library."); Ok(()) }
tokenizers/tokenizers/tests/documentation.rs/0
{ "file_path": "tokenizers/tokenizers/tests/documentation.rs", "repo_id": "tokenizers", "token_count": 7422 }
280
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Run benchmark using the `optimum-benchmark` library with some customization in `transformers`. Assume we are under `transformers` root directory: (make sure the commits are valid commits) ```bash python benchmark/benchmark.py --config-dir benchmark/config --config-name generation --commit=9b9c7f03da625b13643e99205c691fe046461724 --metrics=decode.latency.mean,per_token.latency.mean,per_token.throughput.value backend.model=google/gemma-2b benchmark.input_shapes.sequence_length=5,7 benchmark.input_shapes.batch_size=1,2 --multirun ``` """ import argparse import glob import json import os.path import re import tempfile from contextlib import contextmanager from pathlib import Path from git import Repo from huggingface_hub import HfApi from optimum_benchmark import Benchmark from optimum_benchmark_wrapper import main PATH_TO_REPO = Path(__file__).parent.parent.resolve() @contextmanager def checkout_commit(repo: Repo, commit_id: str): """ Context manager that checks out a given commit when entered, but gets back to the reference it was at on exit. Args: repo (`git.Repo`): A git repository (for instance the Transformers repo). commit_id (`str`): The commit reference to checkout inside the context manager. """ current_head = repo.head.commit if repo.head.is_detached else repo.head.ref try: repo.git.checkout(commit_id) yield finally: repo.git.checkout(current_head) def summarize(run_dir, metrics, expand_metrics=False): """Produce a summary for each optimum-benchmark launched job's output directory found in `run_dir`. Each summary's format is as follows (for `expand_metrics=False`): ``` { "model": "google/gemma-2b", "commit": "3cd6ed22e4d49219f300f5055e71e3929aba20d7", "config": "benchmark.input_shapes.batch_size=1,benchmark.input_shapes.sequence_length=5", "metrics": { "decode.latency.mean": 1.624666809082031, "per_token.latency.mean": 0.012843788806628804, "per_token.throughput.value": 77.85864553330948 } } ``` """ reports = glob.glob(os.path.join(run_dir, "**/benchmark_report.json"), recursive=True) report_dirs = [str(Path(report).parent) for report in reports] summaries = [] for report_dir in report_dirs: commit = re.search(r"/commit=([^/]+)", report_dir).groups()[0] if not os.path.isfile(os.path.join(report_dir, "benchmark.json")): continue benchmark = Benchmark.from_json(os.path.join(report_dir, "benchmark.json")) report = benchmark.report model = benchmark.config.backend["model"] # Ths looks like `benchmark.input_shapes.batch_size=1,benchmark.input_shapes.sequence_length=5`. # (we rely on the usage of hydra's `${hydra.job.override_dirname}`.) benchmark_name = re.sub(f"backend.model={model},*", "", report_dir) benchmark_name = str(Path(benchmark_name).parts[-1]) if benchmark_name.startswith("commit="): benchmark_name = benchmark.config.name metrics_values = {} # post-processing of report: show a few selected/important metric for metric in metrics: keys = metric.split(".") value = report.to_dict() current = metrics_values for key in keys: # Avoid KeyError when a user's specified metric has typo. # TODO: Give warnings. if key not in value: continue value = value[key] if expand_metrics: if isinstance(value, dict): if key not in current: current[key] = {} current = current[key] else: current[key] = value if not expand_metrics: metrics_values[metric] = value # show some config information print(f"model: {model}") print(f"commit: {commit}") print(f"config: {benchmark_name}") if len(metrics_values) > 0: print("metrics:") if expand_metrics: print(metrics_values) else: for metric, value in metrics_values.items(): print(f" - {metric}: {value}") print("-" * 80) summary = { "model": model, "commit": commit, "config": benchmark_name, "metrics": metrics_values, } summaries.append(summary) with open(os.path.join(report_dir, "summary.json"), "w") as fp: json.dump(summary, fp, indent=4) return summaries def combine_summaries(summaries): """Combine a list of summary obtained from the function `summarize`. The combined summary's format is as follows: ``` "google/gemma-2b": { "benchmark.input_shapes.batch_size=1,benchmark.input_shapes.sequence_length=5": { "3cd6ed22e4d49219f300f5055e71e3929aba20d7": { "metrics": {"decode.latency.mean": 1.624666809082031} }, "c97ee28b117c0abe8e08891f402065e4df6d72aa": { "metrics": {"decode.latency.mean": 1.6278163452148438} } }, "benchmark.input_shapes.batch_size=2,benchmark.input_shapes.sequence_length=5": { "3cd6ed22e4d49219f300f5055e71e3929aba20d7": { "metrics": {"decode.latency.mean": 1.6947791748046876} }, "c97ee28b117c0abe8e08891f402065e4df6d72aa": { "metrics": { "decode.latency.mean": 1.6980519409179688} } } } ``` """ combined = {} for summary in summaries: model = summary["model"] config = summary["config"] commit = summary["commit"] if model not in combined: combined[model] = {} if config not in combined[model]: combined[model][config] = {} if commit not in combined[model][config]: combined[model][config][commit] = {"metrics": summary["metrics"]} with open(os.path.join(exp_run_dir, "summary.json"), "w") as fp: json.dump(combined, fp, indent=4) print(json.dumps(combined, indent=4)) return combined if __name__ == "__main__": def list_str(values): return values.split(",") parser = argparse.ArgumentParser() parser.add_argument("--config-dir", type=str, required=True, help="The path to the config directory.") parser.add_argument("--config-name", type=str, required=True, help="The config name.") # arguments specific to this wrapper for our own customization parser.add_argument("--ensure_empty", type=bool, default=True, help="If to create a temporary directory.") parser.add_argument( "--commit", type=list_str, default="", help="Comma-separated list of branch names and/or commit sha values on which the benchmark will run. If `diff` is specified, it will run on both the current head and the `main` branch.", ) parser.add_argument("--metrics", type=str, help="The metrics to be included in the summary.") parser.add_argument("--repo_id", type=str, default=None, help="The repository to which the file will be uploaded.") parser.add_argument("--path_in_repo", type=str, default=None, help="Relative filepath in the repo.") parser.add_argument("--token", type=str, default=None, help="A valid user access token (string).") args, optimum_benchmark_args = parser.parse_known_args() repo = Repo(PATH_TO_REPO) metrics = [ "prefill.latency.mean", "prefill.throughput.value", "decode.latency.mean", "decode.throughput.value", "per_token.latency.mean", "per_token.throughput.value", ] if args.metrics is not None: metrics = args.metrics.split(",") # Get `backend.model` in a hacky way: We want to control the experiment flow manually. models = [""] for idx, arg in enumerate(optimum_benchmark_args): if arg.startswith("backend.model="): models = arg[len("backend.model=") :] models = models.split(",") break optimum_benchmark_args = [arg for arg in optimum_benchmark_args if not arg.startswith("backend.model=")] # Get the commit(s) current_head = str(repo.head.commit) if repo.head.is_detached else str(repo.head.ref) commits = [x for x in args.commit if x != ""] if len(commits) == 0: commits = [current_head] elif len(commits) == 1 and commits[0] == "diff": # compare to `main` commits = ["main", current_head] # Get the specified run directory run_dir_arg_idx, run_dir = -1, None sweep_dir_arg_idx, sweep_dir = -1, None for idx, arg in enumerate(optimum_benchmark_args): if arg.startswith("hydra.run.dir="): run_dir = arg[len("hydra.run.dir=") :] run_dir_arg_idx = idx elif arg.startswith("hydra.sweep.dir="): sweep_dir = arg[len("hydra.sweep.dir=") :] sweep_dir_arg_idx = idx exp_run_dir, arg_dix, arg_name = ( (sweep_dir, sweep_dir_arg_idx, "hydra.sweep.dir") if "--multirun" in optimum_benchmark_args else (run_dir, run_dir_arg_idx, "hydra.run.dir") ) # TODO: not hardcoded if exp_run_dir is None and args.ensure_empty: exp_run_dir = "_benchmark" if args.ensure_empty: os.makedirs(exp_run_dir, exist_ok=True) exp_run_dir = tempfile.mkdtemp(dir=exp_run_dir) run_summaries = [] for commit in commits: with checkout_commit(repo, commit): commit = str(repo.head.commit) commit_run_dir = exp_run_dir if exp_run_dir is not None: commit_run_dir = os.path.join(exp_run_dir, rf"commit\={commit}") print(f"Run benchmark on commit: {commit}") for model in models: model_arg = [f"backend.model={model}"] if model != "" else [] dir_args = [] if commit_run_dir is not None: if arg_dix > -1: optimum_benchmark_args[arg_dix] = f"{arg_name}={commit_run_dir}" else: dir_args = [ f"hydra.sweep.dir={commit_run_dir}", f"hydra.run.dir={commit_run_dir}/" + "${hydra.job.override_dirname}", ] main(args.config_dir, args.config_name, model_arg + dir_args + optimum_benchmark_args) if commit_run_dir is not None: # Need to remove the `\` character summaries = summarize(commit_run_dir.replace("\\", ""), metrics) run_summaries.extend(summaries) # aggregate the information across the commits if exp_run_dir is not None: with open(os.path.join(exp_run_dir, "summaries.json"), "w") as fp: json.dump(run_summaries, fp, indent=4) combined_summary = combine_summaries(run_summaries) if args.repo_id is not None and args.path_in_repo is not None: # Upload to Hub api = HfApi() api.upload_folder( folder_path=exp_run_dir, path_in_repo=args.path_in_repo, repo_id=args.repo_id, repo_type="dataset", token=args.token, )
transformers/benchmark/benchmark.py/0
{ "file_path": "transformers/benchmark/benchmark.py", "repo_id": "transformers", "token_count": 5440 }
281
<!--- Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Generating the documentation To generate the documentation, you first have to build it. Several packages are necessary to build the doc, you can install them with the following command, at the root of the code repository: ```bash pip install -e ".[docs]" ``` Then you need to install our special tool that builds the documentation: ```bash pip install git+https://github.com/huggingface/doc-builder ``` --- **NOTE** You only need to generate the documentation to inspect it locally (if you're planning changes and want to check how they look before committing for instance). You don't have to commit the built documentation. --- ## Building the documentation Once you have setup the `doc-builder` and additional packages, you can generate the documentation by typing the following command: ```bash doc-builder build transformers docs/source/en/ --build_dir ~/tmp/test-build ``` You can adapt the `--build_dir` to set any temporary folder that you prefer. This command will create it and generate the MDX files that will be rendered as the documentation on the main website. You can inspect them in your favorite Markdown editor. ## Previewing the documentation To preview the docs, first install the `watchdog` module with: ```bash pip install watchdog ``` Then run the following command: ```bash doc-builder preview {package_name} {path_to_docs} ``` For example: ```bash doc-builder preview transformers docs/source/en/ ``` The docs will be viewable at [http://localhost:3000](http://localhost:3000). You can also preview the docs once you have opened a PR. You will see a bot add a comment to a link where the documentation with your changes lives. --- **NOTE** The `preview` command only works with existing doc files. When you add a completely new file, you need to update `_toctree.yml` & restart `preview` command (`ctrl-c` to stop it & call `doc-builder preview ...` again). --- ## Adding a new element to the navigation bar Accepted files are Markdown (.md). Create a file with its extension and put it in the source directory. You can then link it to the toc-tree by putting the filename without the extension in the [`_toctree.yml`](https://github.com/huggingface/transformers/blob/main/docs/source/en/_toctree.yml) file. ## Renaming section headers and moving sections It helps to keep the old links working when renaming the section header and/or moving sections from one document to another. This is because the old links are likely to be used in Issues, Forums, and Social media and it'd make for a much more superior user experience if users reading those months later could still easily navigate to the originally intended information. Therefore, we simply keep a little map of moved sections at the end of the document where the original section was. The key is to preserve the original anchor. So if you renamed a section from: "Section A" to "Section B", then you can add at the end of the file: ``` Sections that were moved: [ <a href="#section-b">Section A</a><a id="section-a"></a> ] ``` and of course, if you moved it to another file, then: ``` Sections that were moved: [ <a href="../new-file#section-b">Section A</a><a id="section-a"></a> ] ``` Use the relative style to link to the new file so that the versioned docs continue to work. For an example of a rich moved section set please see the very end of [the Trainer doc](https://github.com/huggingface/transformers/blob/main/docs/source/en/main_classes/trainer.md). ## Writing Documentation - Specification The `huggingface/transformers` documentation follows the [Google documentation](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html) style for docstrings, although we can write them directly in Markdown. ### Adding a new tutorial Adding a new tutorial or section is done in two steps: - Add a new file under `./source`. This file can either be ReStructuredText (.rst) or Markdown (.md). - Link that file in `./source/_toctree.yml` on the correct toc-tree. Make sure to put your new file under the proper section. It's unlikely to go in the first section (*Get Started*), so depending on the intended targets (beginners, more advanced users, or researchers) it should go in sections two, three, or four. ### Translating When translating, refer to the guide at [./TRANSLATING.md](https://github.com/huggingface/transformers/blob/main/docs/TRANSLATING.md). ### Adding a new model When adding a new model: - Create a file `xxx.md` or under `./source/model_doc` (don't hesitate to copy an existing file as template). - Link that file in `./source/_toctree.yml`. - Write a short overview of the model: - Overview with paper & authors - Paper abstract - Tips and tricks and how to use it best - Add the classes that should be linked in the model. This generally includes the configuration, the tokenizer, and every model of that class (the base model, alongside models with additional heads), both in PyTorch and TensorFlow. The order is generally: - Configuration - Tokenizer - PyTorch base model - PyTorch head models - TensorFlow base model - TensorFlow head models - Flax base model - Flax head models These classes should be added using our Markdown syntax. Usually as follows: ``` ## XXXConfig [[autodoc]] XXXConfig ``` This will include every public method of the configuration that is documented. If for some reason you wish for a method not to be displayed in the documentation, you can do so by specifying which methods should be in the docs: ``` ## XXXTokenizer [[autodoc]] XXXTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ``` If you just want to add a method that is not documented (for instance magic methods like `__call__` are not documented by default) you can put the list of methods to add in a list that contains `all`: ``` ## XXXTokenizer [[autodoc]] XXXTokenizer - all - __call__ ``` ### Writing source documentation Values that should be put in `code` should either be surrounded by backticks: \`like so\`. Note that argument names and objects like True, None, or any strings should usually be put in `code`. When mentioning a class, function, or method, it is recommended to use our syntax for internal links so that our tool adds a link to its documentation with this syntax: \[\`XXXClass\`\] or \[\`function\`\]. This requires the class or function to be in the main package. If you want to create a link to some internal class or function, you need to provide its path. For instance: \[\`utils.ModelOutput\`\]. This will be converted into a link with `utils.ModelOutput` in the description. To get rid of the path and only keep the name of the object you are linking to in the description, add a ~: \[\`~utils.ModelOutput\`\] will generate a link with `ModelOutput` in the description. The same works for methods so you can either use \[\`XXXClass.method\`\] or \[\`~XXXClass.method\`\]. #### Defining arguments in a method Arguments should be defined with the `Args:` (or `Arguments:` or `Parameters:`) prefix, followed by a line return and an indentation. The argument should be followed by its type, with its shape if it is a tensor, a colon, and its description: ``` Args: n_layers (`int`): The number of layers of the model. ``` If the description is too long to fit in one line, another indentation is necessary before writing the description after the argument. Here's an example showcasing everything so far: ``` Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AlbertTokenizer`]. See [`~PreTrainedTokenizer.encode`] and [`~PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) ``` For optional arguments or arguments with defaults we follow the following syntax: imagine we have a function with the following signature: ``` def my_function(x: str = None, a: float = 1): ``` then its documentation should look like this: ``` Args: x (`str`, *optional*): This argument controls ... a (`float`, *optional*, defaults to 1): This argument is used to ... ``` Note that we always omit the "defaults to \`None\`" when None is the default for any argument. Also note that even if the first line describing your argument type and its default gets long, you can't break it on several lines. You can however, write as many lines as you want in the indented description (see the example above with `input_ids`). #### Writing a multi-line code block Multi-line code blocks can be useful for displaying examples. They are done between two lines of three backticks as usual in Markdown: ```` ``` # first line of code # second line # etc ``` ```` We follow the [doctest](https://docs.python.org/3/library/doctest.html) syntax for the examples to automatically test the results to stay consistent with the library. #### Writing a return block The return block should be introduced with the `Returns:` prefix, followed by a line return and an indentation. The first line should be the type of the return, followed by a line return. No need to indent further for the elements building the return. Here's an example of a single value return: ``` Returns: `List[int]`: A list of integers in the range [0, 1] --- 1 for a special token, 0 for a sequence token. ``` Here's an example of a tuple return, comprising several objects: ``` Returns: `tuple(torch.FloatTensor)` comprising various elements depending on the configuration ([`BertConfig`]) and inputs: - ** loss** (*optional*, returned when `masked_lm_labels` is provided) `torch.FloatTensor` of shape `(1,)` -- Total loss is the sum of the masked language modeling loss and the next sequence prediction (classification) loss. - **prediction_scores** (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`) -- Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). ``` #### Adding an image Due to the rapidly growing repository, it is important to make sure that no files that would significantly weigh down the repository are added. This includes images, videos, and other non-text files. We prefer to leverage a hf.co hosted `dataset` like the ones hosted on [`hf-internal-testing`](https://huggingface.co/hf-internal-testing) in which to place these files and reference them by URL. We recommend putting them in the following dataset: [huggingface/documentation-images](https://huggingface.co/datasets/huggingface/documentation-images). If an external contribution, feel free to add the images to your PR and ask a Hugging Face member to migrate your images to this dataset. ## Styling the docstring We have an automatic script running with the `make style` comment that will make sure that: - the docstrings fully take advantage of the line width - all code examples are formatted using black, like the code of the Transformers library This script may have some weird failures if you made a syntax mistake or if you uncover a bug. Therefore, it's recommended to commit your changes before running `make style`, so you can revert the changes done by that script easily. # Testing documentation examples Good documentation often comes with an example of how a specific function or class should be used. Each model class should contain at least one example showcasing how to use this model class in inference. *E.g.* the class [Wav2Vec2ForCTC](https://huggingface.co/docs/transformers/model_doc/wav2vec2#transformers.Wav2Vec2ForCTC) includes an example of how to transcribe speech to text in the [docstring of its forward function](https://huggingface.co/docs/transformers/model_doc/wav2vec2#transformers.Wav2Vec2ForCTC.forward). ## Writing documentation examples The syntax for Example docstrings can look as follows: ``` Example: ```python >>> from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC >>> from datasets import load_dataset >>> import torch >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h") >>> model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h") >>> # audio file is decoded on the fly >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_ids = torch.argmax(logits, dim=-1) >>> # transcribe speech >>> transcription = processor.batch_decode(predicted_ids) >>> transcription[0] 'MISTER QUILTER IS THE APOSTLE OF THE MIDDLE CLASSES AND WE ARE GLAD TO WELCOME HIS GOSPEL' ``` ``` The docstring should give a minimal, clear example of how the respective model is to be used in inference and also include the expected (ideally sensible) output. Often, readers will try out the example before even going through the function or class definitions. Therefore, it is of utmost importance that the example works as expected. ## Docstring testing To do so each example should be included in the doctests. We use pytests' [doctest integration](https://docs.pytest.org/doctest.html) to verify that all of our examples run correctly. For Transformers, the doctests are run on a daily basis via GitHub Actions as can be seen [here](https://github.com/huggingface/transformers/actions/workflows/doctests.yml). ### For Python files Run all the tests in the docstrings of a given file with the following command, here is how we test the modeling file of Wav2Vec2 for instance: ```bash pytest --doctest-modules src/transformers/models/wav2vec2/modeling_wav2vec2.py -sv --doctest-continue-on-failure ``` If you want to isolate a specific docstring, just add `::` after the file name then type the whole path of the function/class/method whose docstring you want to test. For instance, here is how to just test the forward method of `Wav2Vec2ForCTC`: ```bash pytest --doctest-modules src/transformers/models/wav2vec2/modeling_wav2vec2.py::transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForCTC.forward -sv --doctest-continue-on-failure ``` ### For Markdown files You can test locally a given file with this command (here testing the quicktour): ```bash pytest --doctest-modules docs/source/quicktour.md -sv --doctest-continue-on-failure --doctest-glob="*.md" ``` ### Writing doctests Here are a few tips to help you debug the doctests and make them pass: - The outputs of the code need to match the expected output **exactly**, so make sure you have the same outputs. In particular doctest will see a difference between single quotes and double quotes, or a missing parenthesis. The only exceptions to that rule are: * whitespace: one give whitespace (space, tabulation, new line) is equivalent to any number of whitespace, so you can add new lines where there are spaces to make your output more readable. * numerical values: you should never put more than 4 or 5 digits to expected results as different setups or library versions might get you slightly different results. `doctest` is configured to ignore any difference lower than the precision to which you wrote (so 1e-4 if you write 4 digits). - Don't leave a block of code that is very long to execute. If you can't make it fast, you can either not use the doctest syntax on it (so that it's ignored), or if you want to use the doctest syntax to show the results, you can add a comment `# doctest: +SKIP` at the end of the lines of code too long to execute - Each line of code that produces a result needs to have that result written below. You can ignore an output if you don't want to show it in your code example by adding a comment ` # doctest: +IGNORE_RESULT` at the end of the line of code producing it.
transformers/docs/README.md/0
{ "file_path": "transformers/docs/README.md", "repo_id": "transformers", "token_count": 4835 }
282
<!--- Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Überprüfungen bei einer Pull-Anfrage Wenn Sie eine Pull-Anfrage für 🤗 Transformers öffnen, wird eine ganze Reihe von Prüfungen durchgeführt, um sicherzustellen, dass der Patch, den Sie hinzufügen, nichts Bestehendes zerstört. Es gibt vier Arten von Prüfungen: - reguläre Tests - Erstellung der Dokumentation - Stil von Code und Dokumentation - allgemeine Konsistenz des Repository In diesem Dokument werden wir versuchen zu erklären, worum es sich bei diesen verschiedenen Prüfungen handelt und wie Sie sie lokal debuggen können, wenn eine der Prüfungen in Ihrer PR fehlschlägt. Beachten Sie, dass Sie im Idealfall eine Dev-Installation benötigen: ```bash pip install transformers[dev] ``` oder für eine bearbeitbare Installation: ```bash pip install -e .[dev] ``` innerhalb des Transformers Repo. Da die Anzahl der optionalen Abhängigkeiten von Transformers stark zugenommen hat, ist es möglich, dass Sie nicht alle davon bekommen können. Wenn die Dev-Installation fehlschlägt, stellen Sie sicher, dass Sie das Deep Learning-Framework, mit dem Sie arbeiten, installieren (PyTorch, TensorFlow und/oder Flax). ```bash pip install transformers[quality] ``` oder für eine bearbeitbare Installation: ```bash pip install -e .[quality] ``` ## Tests Alle Jobs, die mit `ci/circleci: run_tests_` beginnen, führen Teile der Transformers-Testsuite aus. Jeder dieser Jobs konzentriert sich auf einen Teil der Bibliothek in einer bestimmten Umgebung: `ci/circleci: run_tests_pipelines_tf` zum Beispiel führt den Pipelines-Test in einer Umgebung aus, in der nur TensorFlow installiert ist. Beachten Sie, dass nur ein Teil der Testsuite jedes Mal ausgeführt wird, um zu vermeiden, dass Tests ausgeführt werden, wenn es keine wirkliche Änderung in den Modulen gibt, die sie testen: ein Dienstprogramm wird ausgeführt, um die Unterschiede in der Bibliothek zwischen vor und nach dem PR zu ermitteln (was GitHub Ihnen auf der Registerkarte "Files changes" anzeigt) und die Tests auszuwählen, die von diesem Unterschied betroffen sind. Dieses Dienstprogramm kann lokal mit ausgeführt werden: ```bash python utils/tests_fetcher.py ``` aus dem Stammverzeichnis des Transformers-Repositoriums. Es wird: 1. Überprüfen Sie für jede Datei im Diff, ob die Änderungen im Code oder nur in Kommentaren oder Docstrings enthalten sind. Nur die Dateien mit echten Codeänderungen werden beibehalten. 2. Erstellen Sie eine interne Map, die für jede Datei des Quellcodes der Bibliothek alle Dateien angibt, auf die sie rekursiv Einfluss nimmt. Von Modul A wird gesagt, dass es sich auf Modul B auswirkt, wenn Modul B Modul A importiert. Für die rekursive Auswirkung benötigen wir eine Kette von Modulen, die von Modul A zu Modul B führt und in der jedes Modul das vorherige importiert. 3. Wenden Sie diese Zuordnung auf die in Schritt 1 gesammelten Dateien an. So erhalten wir die Liste der Modelldateien, die von der PR betroffen sind. 4. Ordnen Sie jede dieser Dateien der/den entsprechenden Testdatei(en) zu und erhalten Sie die Liste der auszuführenden Tests. Wenn Sie das Skript lokal ausführen, sollten Sie die Ergebnisse von Schritt 1, 3 und 4 ausgegeben bekommen und somit wissen, welche Tests ausgeführt werden. Das Skript erstellt außerdem eine Datei namens `test_list.txt`, die die Liste der auszuführenden Tests enthält, die Sie mit dem folgenden Befehl lokal ausführen können: ```bash python -m pytest -n 8 --dist=loadfile -rA -s $(cat test_list.txt) ``` Für den Fall, dass Ihnen etwas entgangen ist, wird die komplette Testreihe ebenfalls täglich ausgeführt. ## Dokumentation erstellen Der Job `build_pr_documentation` erstellt und generiert eine Vorschau der Dokumentation, um sicherzustellen, dass alles in Ordnung ist, wenn Ihr PR zusammengeführt wird. Ein Bot fügt einen Link zur Vorschau der Dokumentation zu Ihrem PR hinzu. Alle Änderungen, die Sie an dem PR vornehmen, werden automatisch in der Vorschau aktualisiert. Wenn die Dokumentation nicht erstellt werden kann, klicken Sie auf **Details** neben dem fehlgeschlagenen Auftrag, um zu sehen, wo der Fehler liegt. Oft ist der Fehler so einfach wie eine fehlende Datei im `toctree`. Wenn Sie daran interessiert sind, die Dokumentation lokal zu erstellen oder in der Vorschau anzusehen, werfen Sie einen Blick in die [`README.md`](https://github.com/huggingface/transformers/tree/main/docs) im Ordner docs. ## Code und Dokumentationsstil Die Formatierung des Codes erfolgt für alle Quelldateien, die Beispiele und die Tests mit `black` und `ruff`. Wir haben auch ein benutzerdefiniertes Tool, das sich um die Formatierung von docstrings und `rst`-Dateien kümmert (`utils/style_doc.py`), sowie um die Reihenfolge der Lazy-Importe, die in den Transformers `__init__.py`-Dateien durchgeführt werden (`utils/custom_init_isort.py`). All dies können Sie starten, indem Sie Folgendes ausführen ```bash make style ``` Das CI prüft, ob diese innerhalb der Prüfung `ci/circleci: check_code_quality` angewendet wurden. Es führt auch `ruff` aus, das einen grundlegenden Blick auf Ihren Code wirft und sich beschwert, wenn es eine undefinierte Variable findet oder eine, die nicht verwendet wird. Um diese Prüfung lokal auszuführen, verwenden Sie ```bash make quality ``` Dies kann sehr viel Zeit in Anspruch nehmen. Um dasselbe nur für die Dateien zu tun, die Sie im aktuellen Zweig geändert haben, führen Sie ```bash make fixup ``` Dieser letzte Befehl führt auch alle zusätzlichen Prüfungen für die Konsistenz des Repositorys durch. Schauen wir uns diese an. ## Repository-Konsistenz Dies fasst alle Tests zusammen, die sicherstellen, dass Ihr PR das Repository in einem guten Zustand verlässt. Sie können diese Prüfung lokal durchführen, indem Sie Folgendes ausführen: ```bash make repo-consistency ``` Dies überprüft, ob: - Alle zum Init hinzugefügten Objekte sind dokumentiert (ausgeführt von `utils/check_repo.py`) - Alle `__init__.py`-Dateien haben in ihren beiden Abschnitten den gleichen Inhalt (ausgeführt von `utils/check_inits.py`) - Der gesamte Code, der als Kopie eines anderen Moduls identifiziert wurde, stimmt mit dem Original überein (ausgeführt von `utils/check_copies.py`) - Alle Konfigurationsklassen haben mindestens einen gültigen Prüfpunkt, der in ihren Dokumentationen erwähnt wird (ausgeführt von `utils/check_config_docstrings.py`) - Alle Konfigurationsklassen enthalten nur Attribute, die in den entsprechenden Modellierungsdateien verwendet werden (ausgeführt von `utils/check_config_attributes.py`) - Die Übersetzungen der READMEs und der Index des Dokuments haben die gleiche Modellliste wie die Haupt-README (durchgeführt von `utils/check_copies.py`) - Die automatisch generierten Tabellen in der Dokumentation sind auf dem neuesten Stand (ausgeführt von `utils/check_table.py`) - Die Bibliothek verfügt über alle Objekte, auch wenn nicht alle optionalen Abhängigkeiten installiert sind (ausgeführt von `utils/check_dummies.py`) Sollte diese Prüfung fehlschlagen, müssen die ersten beiden Punkte manuell korrigiert werden, die letzten vier können automatisch für Sie korrigiert werden, indem Sie den Befehl ```bash make fix-copies ``` Zusätzliche Prüfungen betreffen PRs, die neue Modelle hinzufügen, vor allem, dass: - Alle hinzugefügten Modelle befinden sich in einer Auto-Zuordnung (durchgeführt von `utils/check_repo.py`) <!-- TODO Sylvain, add a check that makes sure the common tests are implemented.--> - Alle Modelle werden ordnungsgemäß getestet (ausgeführt von `utils/check_repo.py`) <!-- TODO Sylvain, add the following - All models are added to the main README, inside the main doc - All checkpoints used actually exist on the Hub --> ### Kopien prüfen Da die Transformers-Bibliothek in Bezug auf den Modellcode sehr eigenwillig ist und jedes Modell vollständig in einer einzigen Datei implementiert sein sollte, ohne sich auf andere Modelle zu stützen, haben wir einen Mechanismus hinzugefügt, der überprüft, ob eine Kopie des Codes einer Ebene eines bestimmten Modells mit dem Original übereinstimmt. Auf diese Weise können wir bei einer Fehlerbehebung alle anderen betroffenen Modelle sehen und entscheiden, ob wir die Änderung weitergeben oder die Kopie zerstören. <Tip> Wenn eine Datei eine vollständige Kopie einer anderen Datei ist, sollten Sie sie in der Konstante `FULL_COPIES` von `utils/check_copies.py` registrieren. </Tip> Dieser Mechanismus stützt sich auf Kommentare der Form `# Kopiert von xxx`. Das `xxx` sollte den gesamten Pfad zu der Klasse der Funktion enthalten, die darunter kopiert wird. Zum Beispiel ist `RobertaSelfOutput` eine direkte Kopie der Klasse `BertSelfOutput`. Sie können also [hier](https://github.com/huggingface/transformers/blob/2bd7a27a671fd1d98059124024f580f8f5c0f3b5/src/transformers/models/roberta/modeling_roberta.py#L289) sehen, dass sie einen Kommentar hat: ```py # Copied from transformers.models.bert.modeling_bert.BertSelfOutput ``` Beachten Sie, dass Sie dies nicht auf eine ganze Klasse anwenden, sondern auf die entsprechenden Methoden, von denen kopiert wird. Zum Beispiel [hier](https://github.com/huggingface/transformers/blob/2bd7a27a671fd1d98059124024f580f8f5c0f3b5/src/transformers/models/roberta/modeling_roberta.py#L598) können Sie sehen, wie `RobertaPreTrainedModel._init_weights` von der gleichen Methode in `BertPreTrainedModel` mit dem Kommentar kopiert wird: ```py # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights ``` Manchmal ist die Kopie bis auf die Namen genau gleich: zum Beispiel verwenden wir in `RobertaAttention` `RobertaSelfAttention` anstelle von `BertSelfAttention`, aber ansonsten ist der Code genau derselbe. Aus diesem Grund unterstützt `#Copied from` einfache String-Ersetzungen mit der folgenden Syntax: `Kopiert von xxx mit foo->bar`. Das bedeutet, dass der Code kopiert wird, wobei alle Instanzen von "foo" durch "bar" ersetzt werden. Sie können sehen, wie es [hier](https://github.com/huggingface/transformers/blob/2bd7a27a671fd1d98059124024f580f8f5c0f3b5/src/transformers/models/roberta/modeling_roberta.py#L304C1-L304C86) in `RobertaAttention` mit dem Kommentar verwendet wird: ```py # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Roberta ``` Beachten Sie, dass um den Pfeil herum keine Leerzeichen stehen sollten (es sei denn, das Leerzeichen ist Teil des zu ersetzenden Musters, natürlich). Sie können mehrere Muster durch ein Komma getrennt hinzufügen. Zum Beispiel ist hier `CamemberForMaskedLM` eine direkte Kopie von `RobertaForMaskedLM` mit zwei Ersetzungen: `Roberta` zu `Camembert` und `ROBERTA` zu `CAMEMBERT`. Sie können [hier](https://github.com/huggingface/transformers/blob/15082a9dc6950ecae63a0d3e5060b2fc7f15050a/src/transformers/models/camembert/modeling_camembert.py#L929) sehen, wie dies mit dem Kommentar gemacht wird: ```py # Copied from transformers.models.roberta.modeling_roberta.RobertaForMaskedLM with Roberta->Camembert, ROBERTA->CAMEMBERT ``` Wenn die Reihenfolge eine Rolle spielt (weil eine der Ersetzungen mit einer vorherigen in Konflikt geraten könnte), werden die Ersetzungen von links nach rechts ausgeführt. <Tip> Wenn die Ersetzungen die Formatierung ändern (wenn Sie z.B. einen kurzen Namen durch einen sehr langen Namen ersetzen), wird die Kopie nach Anwendung des automatischen Formats überprüft. </Tip> Eine andere Möglichkeit, wenn es sich bei den Mustern nur um verschiedene Umschreibungen derselben Ersetzung handelt (mit einer groß- und einer kleingeschriebenen Variante), besteht darin, die Option `all-casing` hinzuzufügen. [Hier](https://github.com/huggingface/transformers/blob/15082a9dc6950ecae63a0d3e5060b2fc7f15050a/src/transformers/models/mobilebert/modeling_mobilebert.py#L1237) ist ein Beispiel in `MobileBertForSequenceClassification` mit dem Kommentar: ```py # Copied from transformers.models.bert.modeling_bert.BertForSequenceClassification with Bert->MobileBert all-casing ``` In diesem Fall wird der Code von `BertForSequenceClassification` kopiert, indem er ersetzt wird: - `Bert` durch `MobileBert` (zum Beispiel bei der Verwendung von `MobileBertModel` in der Init) - `bert` durch `mobilebert` (zum Beispiel bei der Definition von `self.mobilebert`) - `BERT` durch `MOBILEBERT` (in der Konstante `MOBILEBERT_INPUTS_DOCSTRING`)
transformers/docs/source/de/pr_checks.md/0
{ "file_path": "transformers/docs/source/de/pr_checks.md", "repo_id": "transformers", "token_count": 4986 }
283
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Benchmarks <Tip warning={true}> Hugging Face's Benchmarking tools are deprecated and it is advised to use external Benchmarking libraries to measure the speed and memory complexity of Transformer models. </Tip> [[open-in-colab]] Let's take a look at how 🤗 Transformers models can be benchmarked, best practices, and already available benchmarks. A notebook explaining in more detail how to benchmark 🤗 Transformers models can be found [here](https://github.com/huggingface/notebooks/tree/main/examples/benchmark.ipynb). ## How to benchmark 🤗 Transformers models The classes [`PyTorchBenchmark`] and [`TensorFlowBenchmark`] allow to flexibly benchmark 🤗 Transformers models. The benchmark classes allow us to measure the _peak memory usage_ and _required time_ for both _inference_ and _training_. <Tip> Hereby, _inference_ is defined by a single forward pass, and _training_ is defined by a single forward pass and backward pass. </Tip> The benchmark classes [`PyTorchBenchmark`] and [`TensorFlowBenchmark`] expect an object of type [`PyTorchBenchmarkArguments`] and [`TensorFlowBenchmarkArguments`], respectively, for instantiation. [`PyTorchBenchmarkArguments`] and [`TensorFlowBenchmarkArguments`] are data classes and contain all relevant configurations for their corresponding benchmark class. In the following example, it is shown how a BERT model of type _bert-base-cased_ can be benchmarked. <frameworkcontent> <pt> ```py >>> from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments >>> args = PyTorchBenchmarkArguments(models=["google-bert/bert-base-uncased"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512]) >>> benchmark = PyTorchBenchmark(args) ``` </pt> <tf> ```py >>> from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments >>> args = TensorFlowBenchmarkArguments( ... models=["google-bert/bert-base-uncased"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512] ... ) >>> benchmark = TensorFlowBenchmark(args) ``` </tf> </frameworkcontent> Here, three arguments are given to the benchmark argument data classes, namely `models`, `batch_sizes`, and `sequence_lengths`. The argument `models` is required and expects a `list` of model identifiers from the [model hub](https://huggingface.co/models) The `list` arguments `batch_sizes` and `sequence_lengths` define the size of the `input_ids` on which the model is benchmarked. There are many more parameters that can be configured via the benchmark argument data classes. For more detail on these one can either directly consult the files `src/transformers/benchmark/benchmark_args_utils.py`, `src/transformers/benchmark/benchmark_args.py` (for PyTorch) and `src/transformers/benchmark/benchmark_args_tf.py` (for Tensorflow). Alternatively, running the following shell commands from root will print out a descriptive list of all configurable parameters for PyTorch and Tensorflow respectively. <frameworkcontent> <pt> ```bash python examples/pytorch/benchmarking/run_benchmark.py --help ``` An instantiated benchmark object can then simply be run by calling `benchmark.run()`. ```py >>> results = benchmark.run() >>> print(results) ==================== INFERENCE - SPEED - RESULT ==================== -------------------------------------------------------------------------------- Model Name Batch Size Seq Length Time in s -------------------------------------------------------------------------------- google-bert/bert-base-uncased 8 8 0.006 google-bert/bert-base-uncased 8 32 0.006 google-bert/bert-base-uncased 8 128 0.018 google-bert/bert-base-uncased 8 512 0.088 -------------------------------------------------------------------------------- ==================== INFERENCE - MEMORY - RESULT ==================== -------------------------------------------------------------------------------- Model Name Batch Size Seq Length Memory in MB -------------------------------------------------------------------------------- google-bert/bert-base-uncased 8 8 1227 google-bert/bert-base-uncased 8 32 1281 google-bert/bert-base-uncased 8 128 1307 google-bert/bert-base-uncased 8 512 1539 -------------------------------------------------------------------------------- ==================== ENVIRONMENT INFORMATION ==================== - transformers_version: 2.11.0 - framework: PyTorch - use_torchscript: False - framework_version: 1.4.0 - python_version: 3.6.10 - system: Linux - cpu: x86_64 - architecture: 64bit - date: 2020-06-29 - time: 08:58:43.371351 - fp16: False - use_multiprocessing: True - only_pretrain_model: False - cpu_ram_mb: 32088 - use_gpu: True - num_gpus: 1 - gpu: TITAN RTX - gpu_ram_mb: 24217 - gpu_power_watts: 280.0 - gpu_performance_state: 2 - use_tpu: False ``` </pt> <tf> ```bash python examples/tensorflow/benchmarking/run_benchmark_tf.py --help ``` An instantiated benchmark object can then simply be run by calling `benchmark.run()`. ```py >>> results = benchmark.run() >>> print(results) >>> results = benchmark.run() >>> print(results) ==================== INFERENCE - SPEED - RESULT ==================== -------------------------------------------------------------------------------- Model Name Batch Size Seq Length Time in s -------------------------------------------------------------------------------- google-bert/bert-base-uncased 8 8 0.005 google-bert/bert-base-uncased 8 32 0.008 google-bert/bert-base-uncased 8 128 0.022 google-bert/bert-base-uncased 8 512 0.105 -------------------------------------------------------------------------------- ==================== INFERENCE - MEMORY - RESULT ==================== -------------------------------------------------------------------------------- Model Name Batch Size Seq Length Memory in MB -------------------------------------------------------------------------------- google-bert/bert-base-uncased 8 8 1330 google-bert/bert-base-uncased 8 32 1330 google-bert/bert-base-uncased 8 128 1330 google-bert/bert-base-uncased 8 512 1770 -------------------------------------------------------------------------------- ==================== ENVIRONMENT INFORMATION ==================== - transformers_version: 2.11.0 - framework: Tensorflow - use_xla: False - framework_version: 2.2.0 - python_version: 3.6.10 - system: Linux - cpu: x86_64 - architecture: 64bit - date: 2020-06-29 - time: 09:26:35.617317 - fp16: False - use_multiprocessing: True - only_pretrain_model: False - cpu_ram_mb: 32088 - use_gpu: True - num_gpus: 1 - gpu: TITAN RTX - gpu_ram_mb: 24217 - gpu_power_watts: 280.0 - gpu_performance_state: 2 - use_tpu: False ``` </tf> </frameworkcontent> By default, the _time_ and the _required memory_ for _inference_ are benchmarked. In the example output above the first two sections show the result corresponding to _inference time_ and _inference memory_. In addition, all relevant information about the computing environment, _e.g._ the GPU type, the system, the library versions, etc... are printed out in the third section under _ENVIRONMENT INFORMATION_. This information can optionally be saved in a _.csv_ file when adding the argument `save_to_csv=True` to [`PyTorchBenchmarkArguments`] and [`TensorFlowBenchmarkArguments`] respectively. In this case, every section is saved in a separate _.csv_ file. The path to each _.csv_ file can optionally be defined via the argument data classes. Instead of benchmarking pre-trained models via their model identifier, _e.g._ `google-bert/bert-base-uncased`, the user can alternatively benchmark an arbitrary configuration of any available model class. In this case, a `list` of configurations must be inserted with the benchmark args as follows. <frameworkcontent> <pt> ```py >>> from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments, BertConfig >>> args = PyTorchBenchmarkArguments( ... models=["bert-base", "bert-384-hid", "bert-6-lay"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512] ... ) >>> config_base = BertConfig() >>> config_384_hid = BertConfig(hidden_size=384) >>> config_6_lay = BertConfig(num_hidden_layers=6) >>> benchmark = PyTorchBenchmark(args, configs=[config_base, config_384_hid, config_6_lay]) >>> benchmark.run() ==================== INFERENCE - SPEED - RESULT ==================== -------------------------------------------------------------------------------- Model Name Batch Size Seq Length Time in s -------------------------------------------------------------------------------- bert-base 8 128 0.006 bert-base 8 512 0.006 bert-base 8 128 0.018 bert-base 8 512 0.088 bert-384-hid 8 8 0.006 bert-384-hid 8 32 0.006 bert-384-hid 8 128 0.011 bert-384-hid 8 512 0.054 bert-6-lay 8 8 0.003 bert-6-lay 8 32 0.004 bert-6-lay 8 128 0.009 bert-6-lay 8 512 0.044 -------------------------------------------------------------------------------- ==================== INFERENCE - MEMORY - RESULT ==================== -------------------------------------------------------------------------------- Model Name Batch Size Seq Length Memory in MB -------------------------------------------------------------------------------- bert-base 8 8 1277 bert-base 8 32 1281 bert-base 8 128 1307 bert-base 8 512 1539 bert-384-hid 8 8 1005 bert-384-hid 8 32 1027 bert-384-hid 8 128 1035 bert-384-hid 8 512 1255 bert-6-lay 8 8 1097 bert-6-lay 8 32 1101 bert-6-lay 8 128 1127 bert-6-lay 8 512 1359 -------------------------------------------------------------------------------- ==================== ENVIRONMENT INFORMATION ==================== - transformers_version: 2.11.0 - framework: PyTorch - use_torchscript: False - framework_version: 1.4.0 - python_version: 3.6.10 - system: Linux - cpu: x86_64 - architecture: 64bit - date: 2020-06-29 - time: 09:35:25.143267 - fp16: False - use_multiprocessing: True - only_pretrain_model: False - cpu_ram_mb: 32088 - use_gpu: True - num_gpus: 1 - gpu: TITAN RTX - gpu_ram_mb: 24217 - gpu_power_watts: 280.0 - gpu_performance_state: 2 - use_tpu: False ``` </pt> <tf> ```py >>> from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments, BertConfig >>> args = TensorFlowBenchmarkArguments( ... models=["bert-base", "bert-384-hid", "bert-6-lay"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512] ... ) >>> config_base = BertConfig() >>> config_384_hid = BertConfig(hidden_size=384) >>> config_6_lay = BertConfig(num_hidden_layers=6) >>> benchmark = TensorFlowBenchmark(args, configs=[config_base, config_384_hid, config_6_lay]) >>> benchmark.run() ==================== INFERENCE - SPEED - RESULT ==================== -------------------------------------------------------------------------------- Model Name Batch Size Seq Length Time in s -------------------------------------------------------------------------------- bert-base 8 8 0.005 bert-base 8 32 0.008 bert-base 8 128 0.022 bert-base 8 512 0.106 bert-384-hid 8 8 0.005 bert-384-hid 8 32 0.007 bert-384-hid 8 128 0.018 bert-384-hid 8 512 0.064 bert-6-lay 8 8 0.002 bert-6-lay 8 32 0.003 bert-6-lay 8 128 0.0011 bert-6-lay 8 512 0.074 -------------------------------------------------------------------------------- ==================== INFERENCE - MEMORY - RESULT ==================== -------------------------------------------------------------------------------- Model Name Batch Size Seq Length Memory in MB -------------------------------------------------------------------------------- bert-base 8 8 1330 bert-base 8 32 1330 bert-base 8 128 1330 bert-base 8 512 1770 bert-384-hid 8 8 1330 bert-384-hid 8 32 1330 bert-384-hid 8 128 1330 bert-384-hid 8 512 1540 bert-6-lay 8 8 1330 bert-6-lay 8 32 1330 bert-6-lay 8 128 1330 bert-6-lay 8 512 1540 -------------------------------------------------------------------------------- ==================== ENVIRONMENT INFORMATION ==================== - transformers_version: 2.11.0 - framework: Tensorflow - use_xla: False - framework_version: 2.2.0 - python_version: 3.6.10 - system: Linux - cpu: x86_64 - architecture: 64bit - date: 2020-06-29 - time: 09:38:15.487125 - fp16: False - use_multiprocessing: True - only_pretrain_model: False - cpu_ram_mb: 32088 - use_gpu: True - num_gpus: 1 - gpu: TITAN RTX - gpu_ram_mb: 24217 - gpu_power_watts: 280.0 - gpu_performance_state: 2 - use_tpu: False ``` </tf> </frameworkcontent> Again, _inference time_ and _required memory_ for _inference_ are measured, but this time for customized configurations of the `BertModel` class. This feature can especially be helpful when deciding for which configuration the model should be trained. ## Benchmark best practices This section lists a couple of best practices one should be aware of when benchmarking a model. - Currently, only single device benchmarking is supported. When benchmarking on GPU, it is recommended that the user specifies on which device the code should be run by setting the `CUDA_VISIBLE_DEVICES` environment variable in the shell, _e.g._ `export CUDA_VISIBLE_DEVICES=0` before running the code. - The option `no_multi_processing` should only be set to `True` for testing and debugging. To ensure accurate memory measurement it is recommended to run each memory benchmark in a separate process by making sure `no_multi_processing` is set to `True`. - One should always state the environment information when sharing the results of a model benchmark. Results can vary heavily between different GPU devices, library versions, etc., so that benchmark results on their own are not very useful for the community. ## Sharing your benchmark Previously all available core models (10 at the time) have been benchmarked for _inference time_, across many different settings: using PyTorch, with and without TorchScript, using TensorFlow, with and without XLA. All of those tests were done across CPUs (except for TensorFlow XLA) and GPUs. The approach is detailed in the [following blogpost](https://medium.com/huggingface/benchmarking-transformers-pytorch-and-tensorflow-e2917fb891c2) and the results are available [here](https://docs.google.com/spreadsheets/d/1sryqufw2D0XlUH4sq3e9Wnxu5EAQkaohzrJbd5HdQ_w/edit?usp=sharing). With the new _benchmark_ tools, it is easier than ever to share your benchmark results with the community - [PyTorch Benchmarking Results](https://github.com/huggingface/transformers/tree/main/examples/pytorch/benchmarking/README.md). - [TensorFlow Benchmarking Results](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/benchmarking/README.md).
transformers/docs/source/en/benchmarks.md/0
{ "file_path": "transformers/docs/source/en/benchmarks.md", "repo_id": "transformers", "token_count": 7208 }
284
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Hyperparameter Search using Trainer API 🤗 Transformers provides a [`Trainer`] class optimized for training 🤗 Transformers models, making it easier to start training without manually writing your own training loop. The [`Trainer`] provides API for hyperparameter search. This doc shows how to enable it in example. ## Hyperparameter Search backend [`Trainer`] supports four hyperparameter search backends currently: [optuna](https://optuna.org/), [sigopt](https://sigopt.com/), [raytune](https://docs.ray.io/en/latest/tune/index.html) and [wandb](https://wandb.ai/site/sweeps). you should install them before using them as the hyperparameter search backend ```bash pip install optuna/sigopt/wandb/ray[tune] ``` ## How to enable Hyperparameter search in example Define the hyperparameter search space, different backends need different format. For sigopt, see sigopt [object_parameter](https://docs.sigopt.com/ai-module-api-references/api_reference/objects/object_parameter), it's like following: ```py >>> def sigopt_hp_space(trial): ... return [ ... {"bounds": {"min": 1e-6, "max": 1e-4}, "name": "learning_rate", "type": "double"}, ... { ... "categorical_values": ["16", "32", "64", "128"], ... "name": "per_device_train_batch_size", ... "type": "categorical", ... }, ... ] ``` For optuna, see optuna [object_parameter](https://optuna.readthedocs.io/en/stable/tutorial/10_key_features/002_configurations.html#sphx-glr-tutorial-10-key-features-002-configurations-py), it's like following: ```py >>> def optuna_hp_space(trial): ... return { ... "learning_rate": trial.suggest_float("learning_rate", 1e-6, 1e-4, log=True), ... "per_device_train_batch_size": trial.suggest_categorical("per_device_train_batch_size", [16, 32, 64, 128]), ... } ``` Optuna provides multi-objective HPO. You can pass `direction` in `hyperparameter_search` and define your own compute_objective to return multiple objective values. The Pareto Front (`List[BestRun]`) will be returned in hyperparameter_search, you should refer to the test case `TrainerHyperParameterMultiObjectOptunaIntegrationTest` in [test_trainer](https://github.com/huggingface/transformers/blob/main/tests/trainer/test_trainer.py). It's like following ```py >>> best_trials = trainer.hyperparameter_search( ... direction=["minimize", "maximize"], ... backend="optuna", ... hp_space=optuna_hp_space, ... n_trials=20, ... compute_objective=compute_objective, ... ) ``` For raytune, see raytune [object_parameter](https://docs.ray.io/en/latest/tune/api/search_space.html), it's like following: ```py >>> def ray_hp_space(trial): ... return { ... "learning_rate": tune.loguniform(1e-6, 1e-4), ... "per_device_train_batch_size": tune.choice([16, 32, 64, 128]), ... } ``` For wandb, see wandb [object_parameter](https://docs.wandb.ai/guides/sweeps/configuration), it's like following: ```py >>> def wandb_hp_space(trial): ... return { ... "method": "random", ... "metric": {"name": "objective", "goal": "minimize"}, ... "parameters": { ... "learning_rate": {"distribution": "uniform", "min": 1e-6, "max": 1e-4}, ... "per_device_train_batch_size": {"values": [16, 32, 64, 128]}, ... }, ... } ``` Define a `model_init` function and pass it to the [`Trainer`], as an example: ```py >>> def model_init(trial): ... return AutoModelForSequenceClassification.from_pretrained( ... model_args.model_name_or_path, ... from_tf=bool(".ckpt" in model_args.model_name_or_path), ... config=config, ... cache_dir=model_args.cache_dir, ... revision=model_args.model_revision, ... token=True if model_args.use_auth_token else None, ... ) ``` Create a [`Trainer`] with your `model_init` function, training arguments, training and test datasets, and evaluation function: ```py >>> trainer = Trainer( ... model=None, ... args=training_args, ... train_dataset=small_train_dataset, ... eval_dataset=small_eval_dataset, ... compute_metrics=compute_metrics, ... tokenizer=tokenizer, ... model_init=model_init, ... data_collator=data_collator, ... ) ``` Call hyperparameter search, get the best trial parameters, backend could be `"optuna"`/`"sigopt"`/`"wandb"`/`"ray"`. direction can be`"minimize"` or `"maximize"`, which indicates whether to optimize greater or lower objective. You could define your own compute_objective function, if not defined, the default compute_objective will be called, and the sum of eval metric like f1 is returned as objective value. ```py >>> best_trial = trainer.hyperparameter_search( ... direction="maximize", ... backend="optuna", ... hp_space=optuna_hp_space, ... n_trials=20, ... compute_objective=compute_objective, ... ) ``` ## Hyperparameter search For DDP finetune Currently, Hyperparameter search for DDP is enabled for optuna and sigopt. Only the rank-zero process will generate the search trial and pass the argument to other ranks.
transformers/docs/source/en/hpo_train.md/0
{ "file_path": "transformers/docs/source/en/hpo_train.md", "repo_id": "transformers", "token_count": 2076 }
285
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Agents & Tools <Tip warning={true}> Transformers Agents is an experimental API which is subject to change at any time. Results returned by the agents can vary as the APIs or underlying models are prone to change. </Tip> To learn more about agents and tools make sure to read the [introductory guide](../transformers_agents). This page contains the API docs for the underlying classes. ## Agents We provide two types of agents, based on the main [`Agent`] class: - [`CodeAgent`] acts in one shot, generating code to solve the task, then executes it at once. - [`ReactAgent`] acts step by step, each step consisting of one thought, then one tool call and execution. It has two classes: - [`ReactJsonAgent`] writes its tool calls in JSON. - [`ReactCodeAgent`] writes its tool calls in Python code. ### Agent [[autodoc]] Agent ### CodeAgent [[autodoc]] CodeAgent ### React agents [[autodoc]] ReactAgent [[autodoc]] ReactJsonAgent [[autodoc]] ReactCodeAgent ## Tools ### load_tool [[autodoc]] load_tool ### Tool [[autodoc]] Tool ### Toolbox [[autodoc]] Toolbox ### PipelineTool [[autodoc]] PipelineTool ### launch_gradio_demo [[autodoc]] launch_gradio_demo ### stream_to_gradio [[autodoc]] stream_to_gradio ### ToolCollection [[autodoc]] ToolCollection ## Engines You're free to create and use your own engines to be usable by the Agents framework. These engines have the following specification: 1. Follow the [messages format](../chat_templating.md) for its input (`List[Dict[str, str]]`) and return a string. 2. Stop generating outputs *before* the sequences passed in the argument `stop_sequences` ### TransformersEngine For convenience, we have added a `TransformersEngine` that implements the points above, taking a pre-initialized `Pipeline` as input. ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline, TransformersEngine >>> model_name = "HuggingFaceTB/SmolLM-135M-Instruct" >>> tokenizer = AutoTokenizer.from_pretrained(model_name) >>> model = AutoModelForCausalLM.from_pretrained(model_name) >>> pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) >>> engine = TransformersEngine(pipe) >>> engine([{"role": "user", "content": "Ok!"}], stop_sequences=["great"]) "What a " ``` [[autodoc]] TransformersEngine ### HfApiEngine The `HfApiEngine` is an engine that wraps an [HF Inference API](https://huggingface.co/docs/api-inference/index) client for the execution of the LLM. ```python >>> from transformers import HfApiEngine >>> messages = [ ... {"role": "user", "content": "Hello, how are you?"}, ... {"role": "assistant", "content": "I'm doing great. How can I help you today?"}, ... {"role": "user", "content": "No need to help, take it easy."}, ... ] >>> HfApiEngine()(messages, stop_sequences=["conversation"]) "That's very kind of you to say! It's always nice to have a relaxed " ``` [[autodoc]] HfApiEngine ## Agent Types Agents can handle any type of object in-between tools; tools, being completely multimodal, can accept and return text, image, audio, video, among other types. In order to increase compatibility between tools, as well as to correctly render these returns in ipython (jupyter, colab, ipython notebooks, ...), we implement wrapper classes around these types. The wrapped objects should continue behaving as initially; a text object should still behave as a string, an image object should still behave as a `PIL.Image`. These types have three specific purposes: - Calling `to_raw` on the type should return the underlying object - Calling `to_string` on the type should return the object as a string: that can be the string in case of an `AgentText` but will be the path of the serialized version of the object in other instances - Displaying it in an ipython kernel should display the object correctly ### AgentText [[autodoc]] transformers.agents.agent_types.AgentText ### AgentImage [[autodoc]] transformers.agents.agent_types.AgentImage ### AgentAudio [[autodoc]] transformers.agents.agent_types.AgentAudio
transformers/docs/source/en/main_classes/agent.md/0
{ "file_path": "transformers/docs/source/en/main_classes/agent.md", "repo_id": "transformers", "token_count": 1378 }
286
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Quantization Quantization techniques reduce memory and computational costs by representing weights and activations with lower-precision data types like 8-bit integers (int8). This enables loading larger models you normally wouldn't be able to fit into memory, and speeding up inference. Transformers supports the AWQ and GPTQ quantization algorithms and it supports 8-bit and 4-bit quantization with bitsandbytes. Quantization techniques that aren't supported in Transformers can be added with the [`HfQuantizer`] class. <Tip> Learn how to quantize models in the [Quantization](../quantization) guide. </Tip> ## QuantoConfig [[autodoc]] QuantoConfig ## AqlmConfig [[autodoc]] AqlmConfig ## AwqConfig [[autodoc]] AwqConfig ## EetqConfig [[autodoc]] EetqConfig ## GPTQConfig [[autodoc]] GPTQConfig ## BitsAndBytesConfig [[autodoc]] BitsAndBytesConfig ## HfQuantizer [[autodoc]] quantizers.base.HfQuantizer ## HqqConfig [[autodoc]] HqqConfig ## FbgemmFp8Config [[autodoc]] FbgemmFp8Config ## TorchAoConfig [[autodoc]] TorchAoConfig
transformers/docs/source/en/main_classes/quantization.md/0
{ "file_path": "transformers/docs/source/en/main_classes/quantization.md", "repo_id": "transformers", "token_count": 514 }
287
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # CamemBERT ## Overview The CamemBERT model was proposed in [CamemBERT: a Tasty French Language Model](https://arxiv.org/abs/1911.03894) by [Louis Martin](https://huggingface.co/louismartin), [Benjamin Muller](https://huggingface.co/benjamin-mlr), [Pedro Javier Ortiz Suárez](https://huggingface.co/pjox), Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, [Djamé Seddah](https://huggingface.co/Djame), and [Benoît Sagot](https://huggingface.co/sagot). It is based on Facebook's RoBERTa model released in 2019. It is a model trained on 138GB of French text. The abstract from the paper is the following: *Pretrained language models are now ubiquitous in Natural Language Processing. Despite their success, most available models have either been trained on English data or on the concatenation of data in multiple languages. This makes practical use of such models --in all languages except English-- very limited. Aiming to address this issue for French, we release CamemBERT, a French version of the Bi-directional Encoders for Transformers (BERT). We measure the performance of CamemBERT compared to multilingual models in multiple downstream tasks, namely part-of-speech tagging, dependency parsing, named-entity recognition, and natural language inference. CamemBERT improves the state of the art for most of the tasks considered. We release the pretrained model for CamemBERT hoping to foster research and downstream applications for French NLP.* This model was contributed by [the ALMAnaCH team (Inria)](https://huggingface.co/almanach). The original code can be found [here](https://camembert-model.fr/). <Tip> This implementation is the same as RoBERTa. Refer to the [documentation of RoBERTa](roberta) for usage examples as well as the information relative to the inputs and outputs. </Tip> ## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Question answering task guide](../tasks/question_answering) - [Causal language modeling task guide](../tasks/language_modeling) - [Masked language modeling task guide](../tasks/masked_language_modeling) - [Multiple choice task guide](../tasks/multiple_choice) ## CamembertConfig [[autodoc]] CamembertConfig ## CamembertTokenizer [[autodoc]] CamembertTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## CamembertTokenizerFast [[autodoc]] CamembertTokenizerFast <frameworkcontent> <pt> ## CamembertModel [[autodoc]] CamembertModel ## CamembertForCausalLM [[autodoc]] CamembertForCausalLM ## CamembertForMaskedLM [[autodoc]] CamembertForMaskedLM ## CamembertForSequenceClassification [[autodoc]] CamembertForSequenceClassification ## CamembertForMultipleChoice [[autodoc]] CamembertForMultipleChoice ## CamembertForTokenClassification [[autodoc]] CamembertForTokenClassification ## CamembertForQuestionAnswering [[autodoc]] CamembertForQuestionAnswering </pt> <tf> ## TFCamembertModel [[autodoc]] TFCamembertModel ## TFCamembertForCasualLM [[autodoc]] TFCamembertForCausalLM ## TFCamembertForMaskedLM [[autodoc]] TFCamembertForMaskedLM ## TFCamembertForSequenceClassification [[autodoc]] TFCamembertForSequenceClassification ## TFCamembertForMultipleChoice [[autodoc]] TFCamembertForMultipleChoice ## TFCamembertForTokenClassification [[autodoc]] TFCamembertForTokenClassification ## TFCamembertForQuestionAnswering [[autodoc]] TFCamembertForQuestionAnswering </tf> </frameworkcontent>
transformers/docs/source/en/model_doc/camembert.md/0
{ "file_path": "transformers/docs/source/en/model_doc/camembert.md", "repo_id": "transformers", "token_count": 1309 }
288
<!--Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # CPMAnt ## Overview CPM-Ant is an open-source Chinese pre-trained language model (PLM) with 10B parameters. It is also the first milestone of the live training process of CPM-Live. The training process is cost-effective and environment-friendly. CPM-Ant also achieves promising results with delta tuning on the CUGE benchmark. Besides the full model, we also provide various compressed versions to meet the requirements of different hardware configurations. [See more](https://github.com/OpenBMB/CPM-Live/tree/cpm-ant/cpm-live) This model was contributed by [OpenBMB](https://huggingface.co/openbmb). The original code can be found [here](https://github.com/OpenBMB/CPM-Live/tree/cpm-ant/cpm-live). ## Resources - A tutorial on [CPM-Live](https://github.com/OpenBMB/CPM-Live/tree/cpm-ant/cpm-live). ## CpmAntConfig [[autodoc]] CpmAntConfig - all ## CpmAntTokenizer [[autodoc]] CpmAntTokenizer - all ## CpmAntModel [[autodoc]] CpmAntModel - all ## CpmAntForCausalLM [[autodoc]] CpmAntForCausalLM - all
transformers/docs/source/en/model_doc/cpmant.md/0
{ "file_path": "transformers/docs/source/en/model_doc/cpmant.md", "repo_id": "transformers", "token_count": 534 }
289
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # DialoGPT ## Overview DialoGPT was proposed in [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan. It's a GPT2 Model trained on 147M conversation-like exchanges extracted from Reddit. The abstract from the paper is the following: *We present a large, tunable neural conversational response generation model, DialoGPT (dialogue generative pre-trained transformer). Trained on 147M conversation-like exchanges extracted from Reddit comment chains over a period spanning from 2005 through 2017, DialoGPT extends the Hugging Face PyTorch transformer to attain a performance close to human both in terms of automatic and human evaluation in single-turn dialogue settings. We show that conversational systems that leverage DialoGPT generate more relevant, contentful and context-consistent responses than strong baseline systems. The pre-trained model and training pipeline are publicly released to facilitate research into neural response generation and the development of more intelligent open-domain dialogue systems.* The original code can be found [here](https://github.com/microsoft/DialoGPT). ## Usage tips - DialoGPT is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. - DialoGPT was trained with a causal language modeling (CLM) objective on conversational data and is therefore powerful at response generation in open-domain dialogue systems. - DialoGPT enables the user to create a chat bot in just 10 lines of code as shown on [DialoGPT's model card](https://huggingface.co/microsoft/DialoGPT-medium). Training: In order to train or fine-tune DialoGPT, one can use causal language modeling training. To cite the official paper: *We follow the OpenAI GPT-2 to model a multiturn dialogue session as a long text and frame the generation task as language modeling. We first concatenate all dialog turns within a dialogue session into a long text x_1,..., x_N (N is the sequence length), ended by the end-of-text token.* For more information please confer to the original paper. <Tip> DialoGPT's architecture is based on the GPT2 model, refer to [GPT2's documentation page](gpt2) for API reference and examples. </Tip>
transformers/docs/source/en/model_doc/dialogpt.md/0
{ "file_path": "transformers/docs/source/en/model_doc/dialogpt.md", "repo_id": "transformers", "token_count": 789 }
290
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Falcon ## Overview Falcon is a class of causal decoder-only models built by [TII](https://www.tii.ae/). The largest Falcon checkpoints have been trained on >=1T tokens of text, with a particular emphasis on the [RefinedWeb](https://arxiv.org/abs/2306.01116) corpus. They are made available under the Apache 2.0 license. Falcon's architecture is modern and optimized for inference, with multi-query attention and support for efficient attention variants like `FlashAttention`. Both 'base' models trained only as causal language models as well as 'instruct' models that have received further fine-tuning are available. Falcon models are (as of 2023) some of the largest and most powerful open-source language models, and consistently rank highly in the [OpenLLM leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). ## Converting custom checkpoints <Tip> Falcon models were initially added to the Hugging Face Hub as custom code checkpoints. However, Falcon is now fully supported in the Transformers library. If you fine-tuned a model from a custom code checkpoint, we recommend converting your checkpoint to the new in-library format, as this should give significant improvements to stability and performance, especially for generation, as well as removing the need to use `trust_remote_code=True`! </Tip> You can convert custom code checkpoints to full Transformers checkpoints using the `convert_custom_code_checkpoint.py` script located in the [Falcon model directory](https://github.com/huggingface/transformers/tree/main/src/transformers/models/falcon) of the Transformers library. To use this script, simply call it with `python convert_custom_code_checkpoint.py --checkpoint_dir my_model`. This will convert your checkpoint in-place, and you can immediately load it from the directory afterwards with e.g. `from_pretrained()`. If your model hasn't been uploaded to the Hub, we recommend making a backup before attempting the conversion, just in case! ## FalconConfig [[autodoc]] FalconConfig - all ## FalconModel [[autodoc]] FalconModel - forward ## FalconForCausalLM [[autodoc]] FalconForCausalLM - forward ## FalconForSequenceClassification [[autodoc]] FalconForSequenceClassification - forward ## FalconForTokenClassification [[autodoc]] FalconForTokenClassification - forward ## FalconForQuestionAnswering [[autodoc]] FalconForQuestionAnswering - forward
transformers/docs/source/en/model_doc/falcon.md/0
{ "file_path": "transformers/docs/source/en/model_doc/falcon.md", "repo_id": "transformers", "token_count": 837 }
291
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # GPT-Sw3 ## Overview The GPT-Sw3 model was first proposed in [Lessons Learned from GPT-SW3: Building the First Large-Scale Generative Language Model for Swedish](http://www.lrec-conf.org/proceedings/lrec2022/pdf/2022.lrec-1.376.pdf) by Ariel Ekgren, Amaru Cuba Gyllensten, Evangelia Gogoulou, Alice Heiman, Severine Verlinden, Joey Öhman, Fredrik Carlsson, Magnus Sahlgren. Since that first paper the authors have extended their work and trained new models on their new 1.2TB corpora named The Nordic Pile. GPT-Sw3 is a collection of large decoder-only pretrained transformer language models that were developed by AI Sweden in collaboration with RISE and the WASP WARA for Media and Language. GPT-Sw3 has been trained on a dataset containing 320B tokens in Swedish, Norwegian, Danish, Icelandic, English, and programming code. The model was pretrained using a causal language modeling (CLM) objective utilizing the NeMo Megatron GPT implementation. This model was contributed by [AI Sweden Models](https://huggingface.co/AI-Sweden-Models). ## Usage example ```python >>> from transformers import AutoTokenizer, AutoModelForCausalLM >>> tokenizer = AutoTokenizer.from_pretrained("AI-Sweden-Models/gpt-sw3-356m") >>> model = AutoModelForCausalLM.from_pretrained("AI-Sweden-Models/gpt-sw3-356m") >>> input_ids = tokenizer("Träd är fina för att", return_tensors="pt")["input_ids"] >>> generated_token_ids = model.generate(inputs=input_ids, max_new_tokens=10, do_sample=True)[0] >>> print(tokenizer.decode(generated_token_ids)) Träd är fina för att de är färgstarka. Men ibland är det fint ``` ## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Causal language modeling task guide](../tasks/language_modeling) <Tip> The implementation uses the `GPT2Model` coupled with our `GPTSw3Tokenizer`. Refer to [GPT2Model documentation](gpt2) for API reference and examples. Note that sentencepiece is required to use our tokenizer and can be installed with `pip install transformers[sentencepiece]` or `pip install sentencepiece` </Tip> ## GPTSw3Tokenizer [[autodoc]] GPTSw3Tokenizer - save_vocabulary
transformers/docs/source/en/model_doc/gpt-sw3.md/0
{ "file_path": "transformers/docs/source/en/model_doc/gpt-sw3.md", "repo_id": "transformers", "token_count": 879 }
292
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # IDEFICS ## Overview The IDEFICS model was proposed in [OBELICS: An Open Web-Scale Filtered Dataset of Interleaved Image-Text Documents ](https://huggingface.co/papers/2306.16527 ) by Hugo Laurençon, Lucile Saulnier, Léo Tronchon, Stas Bekman, Amanpreet Singh, Anton Lozhkov, Thomas Wang, Siddharth Karamcheti, Alexander M. Rush, Douwe Kiela, Matthieu Cord, Victor Sanh The abstract from the paper is the following: *Large multimodal models trained on natural documents, which interleave images and text, outperform models trained on image-text pairs on various multimodal benchmarks that require reasoning over one or multiple images to generate a text. However, the datasets used to train these models have not been released, and the collection process has not been fully specified. We introduce the OBELICS dataset, an open web-scale filtered dataset of interleaved image-text documents comprising 141 million web pages extracted from Common Crawl, 353 million associated images, and 115 billion text tokens. We describe the dataset creation process, present comprehensive filtering rules, and provide an analysis of the dataset's content. To show the viability of OBELISC, we train an 80 billion parameters vision and language model on the dataset and obtain competitive performance on various multimodal benchmarks. We release the code to reproduce the dataset along with the dataset itself.* This model was contributed by [HuggingFaceM4](https://huggingface.co/HuggingFaceM4). The original code can be found [here](<INSERT LINK TO GITHUB REPO HERE>). (TODO: don't have a public link yet). <Tip warning={true}> IDEFICS modeling code in Transformers is for finetuning and inferencing the pre-trained IDEFICS models. To train a new IDEFICS model from scratch use the m4 codebase (a link will be provided once it's made public) </Tip> ## IdeficsConfig [[autodoc]] IdeficsConfig ## IdeficsModel [[autodoc]] IdeficsModel - forward ## IdeficsForVisionText2Text [[autodoc]] IdeficsForVisionText2Text - forward ## TFIdeficsModel [[autodoc]] TFIdeficsModel - call ## TFIdeficsForVisionText2Text [[autodoc]] TFIdeficsForVisionText2Text - call ## IdeficsImageProcessor [[autodoc]] IdeficsImageProcessor - preprocess ## IdeficsProcessor [[autodoc]] IdeficsProcessor - __call__
transformers/docs/source/en/model_doc/idefics.md/0
{ "file_path": "transformers/docs/source/en/model_doc/idefics.md", "repo_id": "transformers", "token_count": 836 }
293
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # LiLT ## Overview The LiLT model was proposed in [LiLT: A Simple yet Effective Language-Independent Layout Transformer for Structured Document Understanding](https://arxiv.org/abs/2202.13669) by Jiapeng Wang, Lianwen Jin, Kai Ding. LiLT allows to combine any pre-trained RoBERTa text encoder with a lightweight Layout Transformer, to enable [LayoutLM](layoutlm)-like document understanding for many languages. The abstract from the paper is the following: *Structured document understanding has attracted considerable attention and made significant progress recently, owing to its crucial role in intelligent document processing. However, most existing related models can only deal with the document data of specific language(s) (typically English) included in the pre-training collection, which is extremely limited. To address this issue, we propose a simple yet effective Language-independent Layout Transformer (LiLT) for structured document understanding. LiLT can be pre-trained on the structured documents of a single language and then directly fine-tuned on other languages with the corresponding off-the-shelf monolingual/multilingual pre-trained textual models. Experimental results on eight languages have shown that LiLT can achieve competitive or even superior performance on diverse widely-used downstream benchmarks, which enables language-independent benefit from the pre-training of document layout structure.* <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/lilt_architecture.jpg" alt="drawing" width="600"/> <small> LiLT architecture. Taken from the <a href="https://arxiv.org/abs/2202.13669">original paper</a>. </small> This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/jpwang/lilt). ## Usage tips - To combine the Language-Independent Layout Transformer with a new RoBERTa checkpoint from the [hub](https://huggingface.co/models?search=roberta), refer to [this guide](https://github.com/jpWang/LiLT#or-generate-your-own-checkpoint-optional). The script will result in `config.json` and `pytorch_model.bin` files being stored locally. After doing this, one can do the following (assuming you're logged in with your HuggingFace account): ```python from transformers import LiltModel model = LiltModel.from_pretrained("path_to_your_files") model.push_to_hub("name_of_repo_on_the_hub") ``` - When preparing data for the model, make sure to use the token vocabulary that corresponds to the RoBERTa checkpoint you combined with the Layout Transformer. - As [lilt-roberta-en-base](https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base) uses the same vocabulary as [LayoutLMv3](layoutlmv3), one can use [`LayoutLMv3TokenizerFast`] to prepare data for the model. The same is true for [lilt-roberta-en-base](https://huggingface.co/SCUT-DLVCLab/lilt-infoxlm-base): one can use [`LayoutXLMTokenizerFast`] for that model. ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with LiLT. - Demo notebooks for LiLT can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/LiLT). **Documentation resources** - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Question answering task guide](../tasks/question_answering) If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. ## LiltConfig [[autodoc]] LiltConfig ## LiltModel [[autodoc]] LiltModel - forward ## LiltForSequenceClassification [[autodoc]] LiltForSequenceClassification - forward ## LiltForTokenClassification [[autodoc]] LiltForTokenClassification - forward ## LiltForQuestionAnswering [[autodoc]] LiltForQuestionAnswering - forward
transformers/docs/source/en/model_doc/lilt.md/0
{ "file_path": "transformers/docs/source/en/model_doc/lilt.md", "repo_id": "transformers", "token_count": 1291 }
294
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # MarkupLM ## Overview The MarkupLM model was proposed in [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) by Junlong Li, Yiheng Xu, Lei Cui, Furu Wei. MarkupLM is BERT, but applied to HTML pages instead of raw text documents. The model incorporates additional embedding layers to improve performance, similar to [LayoutLM](layoutlm). The model can be used for tasks like question answering on web pages or information extraction from web pages. It obtains state-of-the-art results on 2 important benchmarks: - [WebSRC](https://x-lance.github.io/WebSRC/), a dataset for Web-Based Structural Reading Comprehension (a bit like SQuAD but for web pages) - [SWDE](https://www.researchgate.net/publication/221299838_From_one_tree_to_a_forest_a_unified_solution_for_structured_web_data_extraction), a dataset for information extraction from web pages (basically named-entity recognition on web pages) The abstract from the paper is the following: *Multimodal pre-training with text, layout, and image has made significant progress for Visually-rich Document Understanding (VrDU), especially the fixed-layout documents such as scanned document images. While, there are still a large number of digital documents where the layout information is not fixed and needs to be interactively and dynamically rendered for visualization, making existing layout-based pre-training approaches not easy to apply. In this paper, we propose MarkupLM for document understanding tasks with markup languages as the backbone such as HTML/XML-based documents, where text and markup information is jointly pre-trained. Experiment results show that the pre-trained MarkupLM significantly outperforms the existing strong baseline models on several document understanding tasks. The pre-trained model and code will be publicly available.* This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/microsoft/unilm/tree/master/markuplm). ## Usage tips - In addition to `input_ids`, [`~MarkupLMModel.forward`] expects 2 additional inputs, namely `xpath_tags_seq` and `xpath_subs_seq`. These are the XPATH tags and subscripts respectively for each token in the input sequence. - One can use [`MarkupLMProcessor`] to prepare all data for the model. Refer to the [usage guide](#usage-markuplmprocessor) for more info. <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/markuplm_architecture.jpg" alt="drawing" width="600"/> <small> MarkupLM architecture. Taken from the <a href="https://arxiv.org/abs/2110.08518">original paper.</a> </small> ## Usage: MarkupLMProcessor The easiest way to prepare data for the model is to use [`MarkupLMProcessor`], which internally combines a feature extractor ([`MarkupLMFeatureExtractor`]) and a tokenizer ([`MarkupLMTokenizer`] or [`MarkupLMTokenizerFast`]). The feature extractor is used to extract all nodes and xpaths from the HTML strings, which are then provided to the tokenizer, which turns them into the token-level inputs of the model (`input_ids` etc.). Note that you can still use the feature extractor and tokenizer separately, if you only want to handle one of the two tasks. ```python from transformers import MarkupLMFeatureExtractor, MarkupLMTokenizerFast, MarkupLMProcessor feature_extractor = MarkupLMFeatureExtractor() tokenizer = MarkupLMTokenizerFast.from_pretrained("microsoft/markuplm-base") processor = MarkupLMProcessor(feature_extractor, tokenizer) ``` In short, one can provide HTML strings (and possibly additional data) to [`MarkupLMProcessor`], and it will create the inputs expected by the model. Internally, the processor first uses [`MarkupLMFeatureExtractor`] to get a list of nodes and corresponding xpaths. The nodes and xpaths are then provided to [`MarkupLMTokenizer`] or [`MarkupLMTokenizerFast`], which converts them to token-level `input_ids`, `attention_mask`, `token_type_ids`, `xpath_subs_seq`, `xpath_tags_seq`. Optionally, one can provide node labels to the processor, which are turned into token-level `labels`. [`MarkupLMFeatureExtractor`] uses [Beautiful Soup](https://www.crummy.com/software/BeautifulSoup/bs4/doc/), a Python library for pulling data out of HTML and XML files, under the hood. Note that you can still use your own parsing solution of choice, and provide the nodes and xpaths yourself to [`MarkupLMTokenizer`] or [`MarkupLMTokenizerFast`]. In total, there are 5 use cases that are supported by the processor. Below, we list them all. Note that each of these use cases work for both batched and non-batched inputs (we illustrate them for non-batched inputs). **Use case 1: web page classification (training, inference) + token classification (inference), parse_html = True** This is the simplest case, in which the processor will use the feature extractor to get all nodes and xpaths from the HTML. ```python >>> from transformers import MarkupLMProcessor >>> processor = MarkupLMProcessor.from_pretrained("microsoft/markuplm-base") >>> html_string = """ ... <!DOCTYPE html> ... <html> ... <head> ... <title>Hello world</title> ... </head> ... <body> ... <h1>Welcome</h1> ... <p>Here is my website.</p> ... </body> ... </html>""" >>> # note that you can also add provide all tokenizer parameters here such as padding, truncation >>> encoding = processor(html_string, return_tensors="pt") >>> print(encoding.keys()) dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'xpath_tags_seq', 'xpath_subs_seq']) ``` **Use case 2: web page classification (training, inference) + token classification (inference), parse_html=False** In case one already has obtained all nodes and xpaths, one doesn't need the feature extractor. In that case, one should provide the nodes and corresponding xpaths themselves to the processor, and make sure to set `parse_html` to `False`. ```python >>> from transformers import MarkupLMProcessor >>> processor = MarkupLMProcessor.from_pretrained("microsoft/markuplm-base") >>> processor.parse_html = False >>> nodes = ["hello", "world", "how", "are"] >>> xpaths = ["/html/body/div/li[1]/div/span", "/html/body/div/li[1]/div/span", "html/body", "html/body/div"] >>> encoding = processor(nodes=nodes, xpaths=xpaths, return_tensors="pt") >>> print(encoding.keys()) dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'xpath_tags_seq', 'xpath_subs_seq']) ``` **Use case 3: token classification (training), parse_html=False** For token classification tasks (such as [SWDE](https://paperswithcode.com/dataset/swde)), one can also provide the corresponding node labels in order to train a model. The processor will then convert these into token-level `labels`. By default, it will only label the first wordpiece of a word, and label the remaining wordpieces with -100, which is the `ignore_index` of PyTorch's CrossEntropyLoss. In case you want all wordpieces of a word to be labeled, you can initialize the tokenizer with `only_label_first_subword` set to `False`. ```python >>> from transformers import MarkupLMProcessor >>> processor = MarkupLMProcessor.from_pretrained("microsoft/markuplm-base") >>> processor.parse_html = False >>> nodes = ["hello", "world", "how", "are"] >>> xpaths = ["/html/body/div/li[1]/div/span", "/html/body/div/li[1]/div/span", "html/body", "html/body/div"] >>> node_labels = [1, 2, 2, 1] >>> encoding = processor(nodes=nodes, xpaths=xpaths, node_labels=node_labels, return_tensors="pt") >>> print(encoding.keys()) dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'xpath_tags_seq', 'xpath_subs_seq', 'labels']) ``` **Use case 4: web page question answering (inference), parse_html=True** For question answering tasks on web pages, you can provide a question to the processor. By default, the processor will use the feature extractor to get all nodes and xpaths, and create [CLS] question tokens [SEP] word tokens [SEP]. ```python >>> from transformers import MarkupLMProcessor >>> processor = MarkupLMProcessor.from_pretrained("microsoft/markuplm-base") >>> html_string = """ ... <!DOCTYPE html> ... <html> ... <head> ... <title>Hello world</title> ... </head> ... <body> ... <h1>Welcome</h1> ... <p>My name is Niels.</p> ... </body> ... </html>""" >>> question = "What's his name?" >>> encoding = processor(html_string, questions=question, return_tensors="pt") >>> print(encoding.keys()) dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'xpath_tags_seq', 'xpath_subs_seq']) ``` **Use case 5: web page question answering (inference), parse_html=False** For question answering tasks (such as WebSRC), you can provide a question to the processor. If you have extracted all nodes and xpaths yourself, you can provide them directly to the processor. Make sure to set `parse_html` to `False`. ```python >>> from transformers import MarkupLMProcessor >>> processor = MarkupLMProcessor.from_pretrained("microsoft/markuplm-base") >>> processor.parse_html = False >>> nodes = ["hello", "world", "how", "are"] >>> xpaths = ["/html/body/div/li[1]/div/span", "/html/body/div/li[1]/div/span", "html/body", "html/body/div"] >>> question = "What's his name?" >>> encoding = processor(nodes=nodes, xpaths=xpaths, questions=question, return_tensors="pt") >>> print(encoding.keys()) dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'xpath_tags_seq', 'xpath_subs_seq']) ``` ## Resources - [Demo notebooks](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/MarkupLM) - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Question answering task guide](../tasks/question_answering) ## MarkupLMConfig [[autodoc]] MarkupLMConfig - all ## MarkupLMFeatureExtractor [[autodoc]] MarkupLMFeatureExtractor - __call__ ## MarkupLMTokenizer [[autodoc]] MarkupLMTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## MarkupLMTokenizerFast [[autodoc]] MarkupLMTokenizerFast - all ## MarkupLMProcessor [[autodoc]] MarkupLMProcessor - __call__ ## MarkupLMModel [[autodoc]] MarkupLMModel - forward ## MarkupLMForSequenceClassification [[autodoc]] MarkupLMForSequenceClassification - forward ## MarkupLMForTokenClassification [[autodoc]] MarkupLMForTokenClassification - forward ## MarkupLMForQuestionAnswering [[autodoc]] MarkupLMForQuestionAnswering - forward
transformers/docs/source/en/model_doc/markuplm.md/0
{ "file_path": "transformers/docs/source/en/model_doc/markuplm.md", "repo_id": "transformers", "token_count": 3443 }
295
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # MobileNet V2 ## Overview The MobileNet model was proposed in [MobileNetV2: Inverted Residuals and Linear Bottlenecks](https://arxiv.org/abs/1801.04381) by Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, Liang-Chieh Chen. The abstract from the paper is the following: *In this paper we describe a new mobile architecture, MobileNetV2, that improves the state of the art performance of mobile models on multiple tasks and benchmarks as well as across a spectrum of different model sizes. We also describe efficient ways of applying these mobile models to object detection in a novel framework we call SSDLite. Additionally, we demonstrate how to build mobile semantic segmentation models through a reduced form of DeepLabv3 which we call Mobile DeepLabv3.* *The MobileNetV2 architecture is based on an inverted residual structure where the input and output of the residual block are thin bottleneck layers opposite to traditional residual models which use expanded representations in the input an MobileNetV2 uses lightweight depthwise convolutions to filter features in the intermediate expansion layer. Additionally, we find that it is important to remove non-linearities in the narrow layers in order to maintain representational power. We demonstrate that this improves performance and provide an intuition that led to this design. Finally, our approach allows decoupling of the input/output domains from the expressiveness of the transformation, which provides a convenient framework for further analysis. We measure our performance on Imagenet classification, COCO object detection, VOC image segmentation. We evaluate the trade-offs between accuracy, and number of operations measured by multiply-adds (MAdd), as well as the number of parameters.* This model was contributed by [matthijs](https://huggingface.co/Matthijs). The original code and weights can be found [here for the main model](https://github.com/tensorflow/models/tree/master/research/slim/nets/mobilenet) and [here for DeepLabV3+](https://github.com/tensorflow/models/tree/master/research/deeplab). ## Usage tips - The checkpoints are named **mobilenet\_v2\_*depth*\_*size***, for example **mobilenet\_v2\_1.0\_224**, where **1.0** is the depth multiplier (sometimes also referred to as "alpha" or the width multiplier) and **224** is the resolution of the input images the model was trained on. - Even though the checkpoint is trained on images of specific size, the model will work on images of any size. The smallest supported image size is 32x32. - One can use [`MobileNetV2ImageProcessor`] to prepare images for the model. - The available image classification checkpoints are pre-trained on [ImageNet-1k](https://huggingface.co/datasets/imagenet-1k) (also referred to as ILSVRC 2012, a collection of 1.3 million images and 1,000 classes). However, the model predicts 1001 classes: the 1000 classes from ImageNet plus an extra “background” class (index 0). - The segmentation model uses a [DeepLabV3+](https://arxiv.org/abs/1802.02611) head. The available semantic segmentation checkpoints are pre-trained on [PASCAL VOC](http://host.robots.ox.ac.uk/pascal/VOC/). - The original TensorFlow checkpoints use different padding rules than PyTorch, requiring the model to determine the padding amount at inference time, since this depends on the input image size. To use native PyTorch padding behavior, create a [`MobileNetV2Config`] with `tf_padding = False`. Unsupported features: - The [`MobileNetV2Model`] outputs a globally pooled version of the last hidden state. In the original model it is possible to use an average pooling layer with a fixed 7x7 window and stride 1 instead of global pooling. For inputs that are larger than the recommended image size, this gives a pooled output that is larger than 1x1. The Hugging Face implementation does not support this. - The original TensorFlow checkpoints include quantized models. We do not support these models as they include additional "FakeQuantization" operations to unquantize the weights. - It's common to extract the output from the expansion layers at indices 10 and 13, as well as the output from the final 1x1 convolution layer, for downstream purposes. Using `output_hidden_states=True` returns the output from all intermediate layers. There is currently no way to limit this to specific layers. - The DeepLabV3+ segmentation head does not use the final convolution layer from the backbone, but this layer gets computed anyway. There is currently no way to tell [`MobileNetV2Model`] up to which layer it should run. ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with MobileNetV2. <PipelineTag pipeline="image-classification"/> - [`MobileNetV2ForImageClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb). - See also: [Image classification task guide](../tasks/image_classification) **Semantic segmentation** - [Semantic segmentation task guide](../tasks/semantic_segmentation) If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. ## MobileNetV2Config [[autodoc]] MobileNetV2Config ## MobileNetV2FeatureExtractor [[autodoc]] MobileNetV2FeatureExtractor - preprocess - post_process_semantic_segmentation ## MobileNetV2ImageProcessor [[autodoc]] MobileNetV2ImageProcessor - preprocess - post_process_semantic_segmentation ## MobileNetV2Model [[autodoc]] MobileNetV2Model - forward ## MobileNetV2ForImageClassification [[autodoc]] MobileNetV2ForImageClassification - forward ## MobileNetV2ForSemanticSegmentation [[autodoc]] MobileNetV2ForSemanticSegmentation - forward
transformers/docs/source/en/model_doc/mobilenet_v2.md/0
{ "file_path": "transformers/docs/source/en/model_doc/mobilenet_v2.md", "repo_id": "transformers", "token_count": 1747 }
296
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Nyströmformer ## Overview The Nyströmformer model was proposed in [*Nyströmformer: A Nyström-Based Algorithm for Approximating Self-Attention*](https://arxiv.org/abs/2102.03902) by Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, and Vikas Singh. The abstract from the paper is the following: *Transformers have emerged as a powerful tool for a broad range of natural language processing tasks. A key component that drives the impressive performance of Transformers is the self-attention mechanism that encodes the influence or dependence of other tokens on each specific token. While beneficial, the quadratic complexity of self-attention on the input sequence length has limited its application to longer sequences -- a topic being actively studied in the community. To address this limitation, we propose Nyströmformer -- a model that exhibits favorable scalability as a function of sequence length. Our idea is based on adapting the Nyström method to approximate standard self-attention with O(n) complexity. The scalability of Nyströmformer enables application to longer sequences with thousands of tokens. We perform evaluations on multiple downstream tasks on the GLUE benchmark and IMDB reviews with standard sequence length, and find that our Nyströmformer performs comparably, or in a few cases, even slightly better, than standard self-attention. On longer sequence tasks in the Long Range Arena (LRA) benchmark, Nyströmformer performs favorably relative to other efficient self-attention methods. Our code is available at this https URL.* This model was contributed by [novice03](https://huggingface.co/novice03). The original code can be found [here](https://github.com/mlpen/Nystromformer). ## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Question answering task guide](../tasks/question_answering) - [Masked language modeling task guide](../tasks/masked_language_modeling) - [Multiple choice task guide](../tasks/multiple_choice) ## NystromformerConfig [[autodoc]] NystromformerConfig ## NystromformerModel [[autodoc]] NystromformerModel - forward ## NystromformerForMaskedLM [[autodoc]] NystromformerForMaskedLM - forward ## NystromformerForSequenceClassification [[autodoc]] NystromformerForSequenceClassification - forward ## NystromformerForMultipleChoice [[autodoc]] NystromformerForMultipleChoice - forward ## NystromformerForTokenClassification [[autodoc]] NystromformerForTokenClassification - forward ## NystromformerForQuestionAnswering [[autodoc]] NystromformerForQuestionAnswering - forward
transformers/docs/source/en/model_doc/nystromformer.md/0
{ "file_path": "transformers/docs/source/en/model_doc/nystromformer.md", "repo_id": "transformers", "token_count": 907 }
297
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contains specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Phi-3 ## Overview The Phi-3 model was proposed in [Phi-3 Technical Report: A Highly Capable Language Model Locally on Your Phone](https://arxiv.org/abs/2404.14219) by Microsoft. ### Summary The abstract from the Phi-3 paper is the following: We introduce phi-3-mini, a 3.8 billion parameter language model trained on 3.3 trillion tokens, whose overall performance, as measured by both academic benchmarks and internal testing, rivals that of models such as Mixtral 8x7B and GPT-3.5 (e.g., phi-3-mini achieves 69% on MMLU and 8.38 on MT-bench), despite being small enough to be deployed on a phone. The innovation lies entirely in our dataset for training, a scaled-up version of the one used for phi-2, composed of heavily filtered web data and synthetic data. The model is also further aligned for robustness, safety, and chat format. We also provide some initial parameter-scaling results with a 7B and 14B models trained for 4.8T tokens, called phi-3-small and phi-3-medium, both significantly more capable than phi-3-mini (e.g., respectively 75% and 78% on MMLU, and 8.7 and 8.9 on MT-bench). The original code for Phi-3 can be found [here](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct). ## Usage tips - This model is very similar to `Llama` with the main difference of [`Phi3SuScaledRotaryEmbedding`] and [`Phi3YarnScaledRotaryEmbedding`], where they are used to extend the context of the rotary embeddings. The query, key and values are fused, and the MLP's up and gate projection layers are also fused. - The tokenizer used for this model is identical to the [`LlamaTokenizer`], with the exception of additional tokens. ## How to use Phi-3 <Tip warning={true}> Phi-3 has been integrated in the development version (4.40.0.dev) of `transformers`. Until the official version is released through `pip`, ensure that you are doing one of the following: * When loading the model, ensure that `trust_remote_code=True` is passed as an argument of the `from_pretrained()` function. * Update your local `transformers` to the development version: `pip uninstall -y transformers && pip install git+https://github.com/huggingface/transformers`. The previous command is an alternative to cloning and installing from the source. </Tip> ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer >>> model = AutoModelForCausalLM.from_pretrained("microsoft/Phi-3-mini-4k-instruct") >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-4k-instruct") >>> messages = [{"role": "user", "content": "Can you provide ways to eat combinations of bananas and dragonfruits?"}] >>> inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt") >>> outputs = model.generate(inputs, max_new_tokens=32) >>> text = tokenizer.batch_decode(outputs)[0] >>> print(text) <s><|user|> Can you provide ways to eat combinations of bananas and dragonfruits?<|end|> <|assistant|> Certainly! Bananas and dragonfruits can be combined in various delicious ways. Here are some ideas for eating combinations of bananas and ``` ## Phi3Config [[autodoc]] Phi3Config <frameworkcontent> <pt> ## Phi3Model [[autodoc]] Phi3Model - forward ## Phi3ForCausalLM [[autodoc]] Phi3ForCausalLM - forward - generate ## Phi3ForSequenceClassification [[autodoc]] Phi3ForSequenceClassification - forward ## Phi3ForTokenClassification [[autodoc]] Phi3ForTokenClassification - forward </pt> </frameworkcontent>
transformers/docs/source/en/model_doc/phi3.md/0
{ "file_path": "transformers/docs/source/en/model_doc/phi3.md", "repo_id": "transformers", "token_count": 1228 }
298
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # RecurrentGemma ## Overview The Recurrent Gemma model was proposed in [RecurrentGemma: Moving Past Transformers for Efficient Open Language Models](https://storage.googleapis.com/deepmind-media/gemma/recurrentgemma-report.pdf) by the Griffin, RLHF and Gemma Teams of Google. The abstract from the paper is the following: *We introduce RecurrentGemma, an open language model which uses Google’s novel Griffin architecture. Griffin combines linear recurrences with local attention to achieve excellent performance on language. It has a fixed-sized state, which reduces memory use and enables efficient inference on long sequences. We provide a pre-trained model with 2B non-embedding parameters, and an instruction tuned variant. Both models achieve comparable performance to Gemma-2B despite being trained on fewer tokens.* Tips: - The original checkpoints can be converted using the conversion script [`src/transformers/models/recurrent_gemma/convert_recurrent_gemma_weights_to_hf.py`](https://github.com/huggingface/transformers/blob/main/src/transformers/models/recurrent_gemma/convert_recurrent_gemma_to_hf.py). This model was contributed by [Arthur Zucker](https://huggingface.co/ArthurZ). The original code can be found [here](https://github.com/google-deepmind/recurrentgemma). ## RecurrentGemmaConfig [[autodoc]] RecurrentGemmaConfig ## RecurrentGemmaModel [[autodoc]] RecurrentGemmaModel - forward ## RecurrentGemmaForCausalLM [[autodoc]] RecurrentGemmaForCausalLM - forward
transformers/docs/source/en/model_doc/recurrent_gemma.md/0
{ "file_path": "transformers/docs/source/en/model_doc/recurrent_gemma.md", "repo_id": "transformers", "token_count": 608 }
299
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # SegGPT ## Overview The SegGPT model was proposed in [SegGPT: Segmenting Everything In Context](https://arxiv.org/abs/2304.03284) by Xinlong Wang, Xiaosong Zhang, Yue Cao, Wen Wang, Chunhua Shen, Tiejun Huang. SegGPT employs a decoder-only Transformer that can generate a segmentation mask given an input image, a prompt image and its corresponding prompt mask. The model achieves remarkable one-shot results with 56.1 mIoU on COCO-20 and 85.6 mIoU on FSS-1000. The abstract from the paper is the following: *We present SegGPT, a generalist model for segmenting everything in context. We unify various segmentation tasks into a generalist in-context learning framework that accommodates different kinds of segmentation data by transforming them into the same format of images. The training of SegGPT is formulated as an in-context coloring problem with random color mapping for each data sample. The objective is to accomplish diverse tasks according to the context, rather than relying on specific colors. After training, SegGPT can perform arbitrary segmentation tasks in images or videos via in-context inference, such as object instance, stuff, part, contour, and text. SegGPT is evaluated on a broad range of tasks, including few-shot semantic segmentation, video object segmentation, semantic segmentation, and panoptic segmentation. Our results show strong capabilities in segmenting in-domain and out-of* Tips: - One can use [`SegGptImageProcessor`] to prepare image input, prompt and mask to the model. - One can either use segmentation maps or RGB images as prompt masks. If using the latter make sure to set `do_convert_rgb=False` in the `preprocess` method. - It's highly advisable to pass `num_labels` when using `segmetantion_maps` (not considering background) during preprocessing and postprocessing with [`SegGptImageProcessor`] for your use case. - When doing inference with [`SegGptForImageSegmentation`] if your `batch_size` is greater than 1 you can use feature ensemble across your images by passing `feature_ensemble=True` in the forward method. Here's how to use the model for one-shot semantic segmentation: ```python import torch from datasets import load_dataset from transformers import SegGptImageProcessor, SegGptForImageSegmentation checkpoint = "BAAI/seggpt-vit-large" image_processor = SegGptImageProcessor.from_pretrained(checkpoint) model = SegGptForImageSegmentation.from_pretrained(checkpoint) dataset_id = "EduardoPacheco/FoodSeg103" ds = load_dataset(dataset_id, split="train") # Number of labels in FoodSeg103 (not including background) num_labels = 103 image_input = ds[4]["image"] ground_truth = ds[4]["label"] image_prompt = ds[29]["image"] mask_prompt = ds[29]["label"] inputs = image_processor( images=image_input, prompt_images=image_prompt, segmentation_maps=mask_prompt, num_labels=num_labels, return_tensors="pt" ) with torch.no_grad(): outputs = model(**inputs) target_sizes = [image_input.size[::-1]] mask = image_processor.post_process_semantic_segmentation(outputs, target_sizes, num_labels=num_labels)[0] ``` This model was contributed by [EduardoPacheco](https://huggingface.co/EduardoPacheco). The original code can be found [here]([(https://github.com/baaivision/Painter/tree/main)). ## SegGptConfig [[autodoc]] SegGptConfig ## SegGptImageProcessor [[autodoc]] SegGptImageProcessor - preprocess - post_process_semantic_segmentation ## SegGptModel [[autodoc]] SegGptModel - forward ## SegGptForImageSegmentation [[autodoc]] SegGptForImageSegmentation - forward
transformers/docs/source/en/model_doc/seggpt.md/0
{ "file_path": "transformers/docs/source/en/model_doc/seggpt.md", "repo_id": "transformers", "token_count": 1258 }
300
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # ViTMSN ## Overview The ViTMSN model was proposed in [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas. The paper presents a joint-embedding architecture to match the prototypes of masked patches with that of the unmasked patches. With this setup, their method yields excellent performance in the low-shot and extreme low-shot regimes. The abstract from the paper is the following: *We propose Masked Siamese Networks (MSN), a self-supervised learning framework for learning image representations. Our approach matches the representation of an image view containing randomly masked patches to the representation of the original unmasked image. This self-supervised pre-training strategy is particularly scalable when applied to Vision Transformers since only the unmasked patches are processed by the network. As a result, MSNs improve the scalability of joint-embedding architectures, while producing representations of a high semantic level that perform competitively on low-shot image classification. For instance, on ImageNet-1K, with only 5,000 annotated images, our base MSN model achieves 72.4% top-1 accuracy, and with 1% of ImageNet-1K labels, we achieve 75.7% top-1 accuracy, setting a new state-of-the-art for self-supervised learning on this benchmark.* <img src="https://i.ibb.co/W6PQMdC/Screenshot-2022-09-13-at-9-08-40-AM.png" alt="drawing" width="600"/> <small> MSN architecture. Taken from the <a href="https://arxiv.org/abs/2204.07141">original paper.</a> </small> This model was contributed by [sayakpaul](https://huggingface.co/sayakpaul). The original code can be found [here](https://github.com/facebookresearch/msn). ## Usage tips - MSN (masked siamese networks) is a method for self-supervised pre-training of Vision Transformers (ViTs). The pre-training objective is to match the prototypes assigned to the unmasked views of the images to that of the masked views of the same images. - The authors have only released pre-trained weights of the backbone (ImageNet-1k pre-training). So, to use that on your own image classification dataset, use the [`ViTMSNForImageClassification`] class which is initialized from [`ViTMSNModel`]. Follow [this notebook](https://github.com/huggingface/notebooks/blob/main/examples/image_classification.ipynb) for a detailed tutorial on fine-tuning. - MSN is particularly useful in the low-shot and extreme low-shot regimes. Notably, it achieves 75.7% top-1 accuracy with only 1% of ImageNet-1K labels when fine-tuned. ### Using Scaled Dot Product Attention (SDPA) PyTorch includes a native scaled dot-product attention (SDPA) operator as part of `torch.nn.functional`. This function encompasses several implementations that can be applied depending on the inputs and the hardware in use. See the [official documentation](https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html) or the [GPU Inference](https://huggingface.co/docs/transformers/main/en/perf_infer_gpu_one#pytorch-scaled-dot-product-attention) page for more information. SDPA is used by default for `torch>=2.1.1` when an implementation is available, but you may also set `attn_implementation="sdpa"` in `from_pretrained()` to explicitly request SDPA to be used. ``` from transformers import ViTMSNForImageClassification model = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-base", attn_implementation="sdpa", torch_dtype=torch.float16) ... ``` For the best speedups, we recommend loading the model in half-precision (e.g. `torch.float16` or `torch.bfloat16`). On a local benchmark (A100-40GB, PyTorch 2.3.0, OS Ubuntu 22.04) with `float32` and `facebook/vit-msn-base` model, we saw the following speedups during inference. | Batch size | Average inference time (ms), eager mode | Average inference time (ms), sdpa model | Speed up, Sdpa / Eager (x) | |--------------|-------------------------------------------|-------------------------------------------|------------------------------| | 1 | 7 | 6 | 1.17 | | 2 | 8 | 6 | 1.33 | | 4 | 8 | 6 | 1.33 | | 8 | 8 | 6 | 1.33 | ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with ViT MSN. <PipelineTag pipeline="image-classification"/> - [`ViTMSNForImageClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb). - See also: [Image classification task guide](../tasks/image_classification) If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. ## ViTMSNConfig [[autodoc]] ViTMSNConfig ## ViTMSNModel [[autodoc]] ViTMSNModel - forward ## ViTMSNForImageClassification [[autodoc]] ViTMSNForImageClassification - forward
transformers/docs/source/en/model_doc/vit_msn.md/0
{ "file_path": "transformers/docs/source/en/model_doc/vit_msn.md", "repo_id": "transformers", "token_count": 2134 }
301
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # XLM-V ## Overview XLM-V is multilingual language model with a one million token vocabulary trained on 2.5TB of data from Common Crawl (same as XLM-R). It was introduced in the [XLM-V: Overcoming the Vocabulary Bottleneck in Multilingual Masked Language Models](https://arxiv.org/abs/2301.10472) paper by Davis Liang, Hila Gonen, Yuning Mao, Rui Hou, Naman Goyal, Marjan Ghazvininejad, Luke Zettlemoyer and Madian Khabsa. From the abstract of the XLM-V paper: *Large multilingual language models typically rely on a single vocabulary shared across 100+ languages. As these models have increased in parameter count and depth, vocabulary size has remained largely unchanged. This vocabulary bottleneck limits the representational capabilities of multilingual models like XLM-R. In this paper, we introduce a new approach for scaling to very large multilingual vocabularies by de-emphasizing token sharing between languages with little lexical overlap and assigning vocabulary capacity to achieve sufficient coverage for each individual language. Tokenizations using our vocabulary are typically more semantically meaningful and shorter compared to XLM-R. Leveraging this improved vocabulary, we train XLM-V, a multilingual language model with a one million token vocabulary. XLM-V outperforms XLM-R on every task we tested on ranging from natural language inference (XNLI), question answering (MLQA, XQuAD, TyDiQA), and named entity recognition (WikiAnn) to low-resource tasks (Americas NLI, MasakhaNER).* This model was contributed by [stefan-it](https://huggingface.co/stefan-it), including detailed experiments with XLM-V on downstream tasks. The experiments repository can be found [here](https://github.com/stefan-it/xlm-v-experiments). ## Usage tips - XLM-V is compatible with the XLM-RoBERTa model architecture, only model weights from [`fairseq`](https://github.com/facebookresearch/fairseq) library had to be converted. - The `XLMTokenizer` implementation is used to load the vocab and performs tokenization. A XLM-V (base size) model is available under the [`facebook/xlm-v-base`](https://huggingface.co/facebook/xlm-v-base) identifier. <Tip> XLM-V architecture is the same as XLM-RoBERTa, refer to [XLM-RoBERTa documentation](xlm-roberta) for API reference, and examples. </Tip>
transformers/docs/source/en/model_doc/xlm-v.md/0
{ "file_path": "transformers/docs/source/en/model_doc/xlm-v.md", "repo_id": "transformers", "token_count": 809 }
302
<!--- Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Custom hardware for training The hardware you use to run model training and inference can have a big effect on performance. For a deep dive into GPUs make sure to check out Tim Dettmer's excellent [blog post](https://timdettmers.com/2020/09/07/which-gpu-for-deep-learning/). Let's have a look at some practical advice for GPU setups. ## GPU When you train bigger models you have essentially three options: - bigger GPUs - more GPUs - more CPU and NVMe (offloaded to by [DeepSpeed-Infinity](main_classes/deepspeed#nvme-support)) Let's start at the case where you have a single GPU. ### Power and Cooling If you bought an expensive high end GPU make sure you give it the correct power and sufficient cooling. **Power**: Some high end consumer GPU cards have 2 and sometimes 3 PCI-E 8-Pin power sockets. Make sure you have as many independent 12V PCI-E 8-Pin cables plugged into the card as there are sockets. Do not use the 2 splits at one end of the same cable (also known as pigtail cable). That is if you have 2 sockets on the GPU, you want 2 PCI-E 8-Pin cables going from your PSU to the card and not one that has 2 PCI-E 8-Pin connectors at the end! You won't get the full performance out of your card otherwise. Each PCI-E 8-Pin power cable needs to be plugged into a 12V rail on the PSU side and can supply up to 150W of power. Some other cards may use a PCI-E 12-Pin connectors, and these can deliver up to 500-600W of power. Low end cards may use 6-Pin connectors, which supply up to 75W of power. Additionally you want the high-end PSU that has stable voltage. Some lower quality ones may not give the card the stable voltage it needs to function at its peak. And of course the PSU needs to have enough unused Watts to power the card. **Cooling**: When a GPU gets overheated it will start throttling down and will not deliver full performance and it can even shutdown if it gets too hot. It's hard to tell the exact best temperature to strive for when a GPU is heavily loaded, but probably anything under +80C is good, but lower is better - perhaps 70-75C is an excellent range to be in. The throttling down is likely to start at around 84-90C. But other than throttling performance a prolonged very high temperature is likely to reduce the lifespan of a GPU. Next let's have a look at one of the most important aspects when having multiple GPUs: connectivity. ### Multi-GPU Connectivity If you use multiple GPUs the way cards are inter-connected can have a huge impact on the total training time. If the GPUs are on the same physical node, you can run: ```bash nvidia-smi topo -m ``` and it will tell you how the GPUs are inter-connected. On a machine with dual-GPU and which are connected with NVLink, you will most likely see something like: ``` GPU0 GPU1 CPU Affinity NUMA Affinity GPU0 X NV2 0-23 N/A GPU1 NV2 X 0-23 N/A ``` on a different machine w/o NVLink we may see: ``` GPU0 GPU1 CPU Affinity NUMA Affinity GPU0 X PHB 0-11 N/A GPU1 PHB X 0-11 N/A ``` The report includes this legend: ``` X = Self SYS = Connection traversing PCIe as well as the SMP interconnect between NUMA nodes (e.g., QPI/UPI) NODE = Connection traversing PCIe as well as the interconnect between PCIe Host Bridges within a NUMA node PHB = Connection traversing PCIe as well as a PCIe Host Bridge (typically the CPU) PXB = Connection traversing multiple PCIe bridges (without traversing the PCIe Host Bridge) PIX = Connection traversing at most a single PCIe bridge NV# = Connection traversing a bonded set of # NVLinks ``` So the first report `NV2` tells us the GPUs are interconnected with 2 NVLinks, and the second report `PHB` we have a typical consumer-level PCIe+Bridge setup. Check what type of connectivity you have on your setup. Some of these will make the communication between cards faster (e.g. NVLink), others slower (e.g. PHB). Depending on the type of scalability solution used, the connectivity speed could have a major or a minor impact. If the GPUs need to sync rarely, as in DDP, the impact of a slower connection will be less significant. If the GPUs need to send messages to each other often, as in ZeRO-DP, then faster connectivity becomes super important to achieve faster training. #### NVlink [NVLink](https://en.wikipedia.org/wiki/NVLink) is a wire-based serial multi-lane near-range communications link developed by Nvidia. Each new generation provides a faster bandwidth, e.g. here is a quote from [Nvidia Ampere GA102 GPU Architecture](https://www.nvidia.com/content/dam/en-zz/Solutions/geforce/ampere/pdf/NVIDIA-ampere-GA102-GPU-Architecture-Whitepaper-V1.pdf): > Third-Generation NVLink® > GA102 GPUs utilize NVIDIA’s third-generation NVLink interface, which includes four x4 links, > with each link providing 14.0625 GB/sec bandwidth in each direction between two GPUs. Four > links provide 56.25 GB/sec bandwidth in each direction, and 112.5 GB/sec total bandwidth > between two GPUs. Two RTX 3090 GPUs can be connected together for SLI using NVLink. > (Note that 3-Way and 4-Way SLI configurations are not supported.) So the higher `X` you get in the report of `NVX` in the output of `nvidia-smi topo -m` the better. The generation will depend on your GPU architecture. Let's compare the execution of an openai-community/gpt2 language model training over a small sample of wikitext. The results are: | NVlink | Time | | ----- | ---: | | Y | 101s | | N | 131s | You can see that NVLink completes the training ~23% faster. In the second benchmark we use `NCCL_P2P_DISABLE=1` to tell the GPUs not to use NVLink. Here is the full benchmark code and outputs: ```bash # DDP w/ NVLink rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 torchrun \ --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py --model_name_or_path openai-community/gpt2 \ --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 --do_train \ --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 {'train_runtime': 101.9003, 'train_samples_per_second': 1.963, 'epoch': 0.69} # DDP w/o NVLink rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 NCCL_P2P_DISABLE=1 torchrun \ --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py --model_name_or_path openai-community/gpt2 \ --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 --do_train --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 {'train_runtime': 131.4367, 'train_samples_per_second': 1.522, 'epoch': 0.69} ``` Hardware: 2x TITAN RTX 24GB each + NVlink with 2 NVLinks (`NV2` in `nvidia-smi topo -m`) Software: `pytorch-1.8-to-be` + `cuda-11.0` / `transformers==4.3.0.dev0`
transformers/docs/source/en/perf_hardware.md/0
{ "file_path": "transformers/docs/source/en/perf_hardware.md", "repo_id": "transformers", "token_count": 2317 }
303
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Preprocess [[open-in-colab]] Before you can train a model on a dataset, it needs to be preprocessed into the expected model input format. Whether your data is text, images, or audio, they need to be converted and assembled into batches of tensors. 🤗 Transformers provides a set of preprocessing classes to help prepare your data for the model. In this tutorial, you'll learn that for: * Text, use a [Tokenizer](./main_classes/tokenizer) to convert text into a sequence of tokens, create a numerical representation of the tokens, and assemble them into tensors. * Speech and audio, use a [Feature extractor](./main_classes/feature_extractor) to extract sequential features from audio waveforms and convert them into tensors. * Image inputs use a [ImageProcessor](./main_classes/image_processor) to convert images into tensors. * Multimodal inputs, use a [Processor](./main_classes/processors) to combine a tokenizer and a feature extractor or image processor. <Tip> `AutoProcessor` **always** works and automatically chooses the correct class for the model you're using, whether you're using a tokenizer, image processor, feature extractor or processor. </Tip> Before you begin, install 🤗 Datasets so you can load some datasets to experiment with: ```bash pip install datasets ``` ## Natural Language Processing <Youtube id="Yffk5aydLzg"/> The main tool for preprocessing textual data is a [tokenizer](main_classes/tokenizer). A tokenizer splits text into *tokens* according to a set of rules. The tokens are converted into numbers and then tensors, which become the model inputs. Any additional inputs required by the model are added by the tokenizer. <Tip> If you plan on using a pretrained model, it's important to use the associated pretrained tokenizer. This ensures the text is split the same way as the pretraining corpus, and uses the same corresponding tokens-to-index (usually referred to as the *vocab*) during pretraining. </Tip> Get started by loading a pretrained tokenizer with the [`AutoTokenizer.from_pretrained`] method. This downloads the *vocab* a model was pretrained with: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-cased") ``` Then pass your text to the tokenizer: ```py >>> encoded_input = tokenizer("Do not meddle in the affairs of wizards, for they are subtle and quick to anger.") >>> print(encoded_input) {'input_ids': [101, 2079, 2025, 19960, 10362, 1999, 1996, 3821, 1997, 16657, 1010, 2005, 2027, 2024, 11259, 1998, 4248, 2000, 4963, 1012, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} ``` The tokenizer returns a dictionary with three important items: * [input_ids](glossary#input-ids) are the indices corresponding to each token in the sentence. * [attention_mask](glossary#attention-mask) indicates whether a token should be attended to or not. * [token_type_ids](glossary#token-type-ids) identifies which sequence a token belongs to when there is more than one sequence. Return your input by decoding the `input_ids`: ```py >>> tokenizer.decode(encoded_input["input_ids"]) '[CLS] Do not meddle in the affairs of wizards, for they are subtle and quick to anger. [SEP]' ``` As you can see, the tokenizer added two special tokens - `CLS` and `SEP` (classifier and separator) - to the sentence. Not all models need special tokens, but if they do, the tokenizer automatically adds them for you. If there are several sentences you want to preprocess, pass them as a list to the tokenizer: ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_inputs = tokenizer(batch_sentences) >>> print(encoded_inputs) {'input_ids': [[101, 1252, 1184, 1164, 1248, 6462, 136, 102], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1]]} ``` ### Pad Sentences aren't always the same length which can be an issue because tensors, the model inputs, need to have a uniform shape. Padding is a strategy for ensuring tensors are rectangular by adding a special *padding token* to shorter sentences. Set the `padding` parameter to `True` to pad the shorter sequences in the batch to match the longest sequence: ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_input = tokenizer(batch_sentences, padding=True) >>> print(encoded_input) {'input_ids': [[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]]} ``` The first and third sentences are now padded with `0`'s because they are shorter. ### Truncation On the other end of the spectrum, sometimes a sequence may be too long for a model to handle. In this case, you'll need to truncate the sequence to a shorter length. Set the `truncation` parameter to `True` to truncate a sequence to the maximum length accepted by the model: ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_input = tokenizer(batch_sentences, padding=True, truncation=True) >>> print(encoded_input) {'input_ids': [[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]]} ``` <Tip> Check out the [Padding and truncation](./pad_truncation) concept guide to learn more different padding and truncation arguments. </Tip> ### Build tensors Finally, you want the tokenizer to return the actual tensors that get fed to the model. Set the `return_tensors` parameter to either `pt` for PyTorch, or `tf` for TensorFlow: <frameworkcontent> <pt> ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_input = tokenizer(batch_sentences, padding=True, truncation=True, return_tensors="pt") >>> print(encoded_input) {'input_ids': tensor([[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]]), 'token_type_ids': tensor([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]])} ``` </pt> <tf> ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_input = tokenizer(batch_sentences, padding=True, truncation=True, return_tensors="tf") >>> print(encoded_input) {'input_ids': <tf.Tensor: shape=(2, 9), dtype=int32, numpy= array([[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=int32)>, 'token_type_ids': <tf.Tensor: shape=(2, 9), dtype=int32, numpy= array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=int32)>, 'attention_mask': <tf.Tensor: shape=(2, 9), dtype=int32, numpy= array([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=int32)>} ``` </tf> </frameworkcontent> <Tip> Different pipelines support tokenizer arguments in their `__call__()` differently. `text-2-text-generation` pipelines support (i.e. pass on) only `truncation`. `text-generation` pipelines support `max_length`, `truncation`, `padding` and `add_special_tokens`. In `fill-mask` pipelines, tokenizer arguments can be passed in the `tokenizer_kwargs` argument (dictionary). </Tip> ## Audio For audio tasks, you'll need a [feature extractor](main_classes/feature_extractor) to prepare your dataset for the model. The feature extractor is designed to extract features from raw audio data, and convert them into tensors. Load the [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) dataset (see the 🤗 [Datasets tutorial](https://huggingface.co/docs/datasets/load_hub) for more details on how to load a dataset) to see how you can use a feature extractor with audio datasets: ```py >>> from datasets import load_dataset, Audio >>> dataset = load_dataset("PolyAI/minds14", name="en-US", split="train") ``` Access the first element of the `audio` column to take a look at the input. Calling the `audio` column automatically loads and resamples the audio file: ```py >>> dataset[0]["audio"] {'array': array([ 0. , 0.00024414, -0.00024414, ..., -0.00024414, 0. , 0. ], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', 'sampling_rate': 8000} ``` This returns three items: * `array` is the speech signal loaded - and potentially resampled - as a 1D array. * `path` points to the location of the audio file. * `sampling_rate` refers to how many data points in the speech signal are measured per second. For this tutorial, you'll use the [Wav2Vec2](https://huggingface.co/facebook/wav2vec2-base) model. Take a look at the model card, and you'll learn Wav2Vec2 is pretrained on 16kHz sampled speech audio. It is important your audio data's sampling rate matches the sampling rate of the dataset used to pretrain the model. If your data's sampling rate isn't the same, then you need to resample your data. 1. Use 🤗 Datasets' [`~datasets.Dataset.cast_column`] method to upsample the sampling rate to 16kHz: ```py >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16_000)) ``` 2. Call the `audio` column again to resample the audio file: ```py >>> dataset[0]["audio"] {'array': array([ 2.3443763e-05, 2.1729663e-04, 2.2145823e-04, ..., 3.8356509e-05, -7.3497440e-06, -2.1754686e-05], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', 'sampling_rate': 16000} ``` Next, load a feature extractor to normalize and pad the input. When padding textual data, a `0` is added for shorter sequences. The same idea applies to audio data. The feature extractor adds a `0` - interpreted as silence - to `array`. Load the feature extractor with [`AutoFeatureExtractor.from_pretrained`]: ```py >>> from transformers import AutoFeatureExtractor >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base") ``` Pass the audio `array` to the feature extractor. We also recommend adding the `sampling_rate` argument in the feature extractor in order to better debug any silent errors that may occur. ```py >>> audio_input = [dataset[0]["audio"]["array"]] >>> feature_extractor(audio_input, sampling_rate=16000) {'input_values': [array([ 3.8106556e-04, 2.7506407e-03, 2.8015103e-03, ..., 5.6335266e-04, 4.6588284e-06, -1.7142107e-04], dtype=float32)]} ``` Just like the tokenizer, you can apply padding or truncation to handle variable sequences in a batch. Take a look at the sequence length of these two audio samples: ```py >>> dataset[0]["audio"]["array"].shape (173398,) >>> dataset[1]["audio"]["array"].shape (106496,) ``` Create a function to preprocess the dataset so the audio samples are the same lengths. Specify a maximum sample length, and the feature extractor will either pad or truncate the sequences to match it: ```py >>> def preprocess_function(examples): ... audio_arrays = [x["array"] for x in examples["audio"]] ... inputs = feature_extractor( ... audio_arrays, ... sampling_rate=16000, ... padding=True, ... max_length=100000, ... truncation=True, ... ) ... return inputs ``` Apply the `preprocess_function` to the first few examples in the dataset: ```py >>> processed_dataset = preprocess_function(dataset[:5]) ``` The sample lengths are now the same and match the specified maximum length. You can pass your processed dataset to the model now! ```py >>> processed_dataset["input_values"][0].shape (100000,) >>> processed_dataset["input_values"][1].shape (100000,) ``` ## Computer vision For computer vision tasks, you'll need an [image processor](main_classes/image_processor) to prepare your dataset for the model. Image preprocessing consists of several steps that convert images into the input expected by the model. These steps include but are not limited to resizing, normalizing, color channel correction, and converting images to tensors. <Tip> Image preprocessing often follows some form of image augmentation. Both image preprocessing and image augmentation transform image data, but they serve different purposes: * Image augmentation alters images in a way that can help prevent overfitting and increase the robustness of the model. You can get creative in how you augment your data - adjust brightness and colors, crop, rotate, resize, zoom, etc. However, be mindful not to change the meaning of the images with your augmentations. * Image preprocessing guarantees that the images match the model’s expected input format. When fine-tuning a computer vision model, images must be preprocessed exactly as when the model was initially trained. You can use any library you like for image augmentation. For image preprocessing, use the `ImageProcessor` associated with the model. </Tip> Load the [food101](https://huggingface.co/datasets/food101) dataset (see the 🤗 [Datasets tutorial](https://huggingface.co/docs/datasets/load_hub) for more details on how to load a dataset) to see how you can use an image processor with computer vision datasets: <Tip> Use 🤗 Datasets `split` parameter to only load a small sample from the training split since the dataset is quite large! </Tip> ```py >>> from datasets import load_dataset >>> dataset = load_dataset("food101", split="train[:100]") ``` Next, take a look at the image with 🤗 Datasets [`Image`](https://huggingface.co/docs/datasets/package_reference/main_classes?highlight=image#datasets.Image) feature: ```py >>> dataset[0]["image"] ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/vision-preprocess-tutorial.png"/> </div> Load the image processor with [`AutoImageProcessor.from_pretrained`]: ```py >>> from transformers import AutoImageProcessor >>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224") ``` First, let's add some image augmentation. You can use any library you prefer, but in this tutorial, we'll use torchvision's [`transforms`](https://pytorch.org/vision/stable/transforms.html) module. If you're interested in using another data augmentation library, learn how in the [Albumentations](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification_albumentations.ipynb) or [Kornia notebooks](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification_kornia.ipynb). 1. Here we use [`Compose`](https://pytorch.org/vision/master/generated/torchvision.transforms.Compose.html) to chain together a couple of transforms - [`RandomResizedCrop`](https://pytorch.org/vision/main/generated/torchvision.transforms.RandomResizedCrop.html) and [`ColorJitter`](https://pytorch.org/vision/main/generated/torchvision.transforms.ColorJitter.html). Note that for resizing, we can get the image size requirements from the `image_processor`. For some models, an exact height and width are expected, for others only the `shortest_edge` is defined. ```py >>> from torchvision.transforms import RandomResizedCrop, ColorJitter, Compose >>> size = ( ... image_processor.size["shortest_edge"] ... if "shortest_edge" in image_processor.size ... else (image_processor.size["height"], image_processor.size["width"]) ... ) >>> _transforms = Compose([RandomResizedCrop(size), ColorJitter(brightness=0.5, hue=0.5)]) ``` 2. The model accepts [`pixel_values`](model_doc/vision-encoder-decoder#transformers.VisionEncoderDecoderModel.forward.pixel_values) as its input. `ImageProcessor` can take care of normalizing the images, and generating appropriate tensors. Create a function that combines image augmentation and image preprocessing for a batch of images and generates `pixel_values`: ```py >>> def transforms(examples): ... images = [_transforms(img.convert("RGB")) for img in examples["image"]] ... examples["pixel_values"] = image_processor(images, do_resize=False, return_tensors="pt")["pixel_values"] ... return examples ``` <Tip> In the example above we set `do_resize=False` because we have already resized the images in the image augmentation transformation, and leveraged the `size` attribute from the appropriate `image_processor`. If you do not resize images during image augmentation, leave this parameter out. By default, `ImageProcessor` will handle the resizing. If you wish to normalize images as a part of the augmentation transformation, use the `image_processor.image_mean`, and `image_processor.image_std` values. </Tip> 3. Then use 🤗 Datasets[`~datasets.Dataset.set_transform`] to apply the transforms on the fly: ```py >>> dataset.set_transform(transforms) ``` 4. Now when you access the image, you'll notice the image processor has added `pixel_values`. You can pass your processed dataset to the model now! ```py >>> dataset[0].keys() ``` Here is what the image looks like after the transforms are applied. The image has been randomly cropped and it's color properties are different. ```py >>> import numpy as np >>> import matplotlib.pyplot as plt >>> img = dataset[0]["pixel_values"] >>> plt.imshow(img.permute(1, 2, 0)) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/preprocessed_image.png"/> </div> <Tip> For tasks like object detection, semantic segmentation, instance segmentation, and panoptic segmentation, `ImageProcessor` offers post processing methods. These methods convert model's raw outputs into meaningful predictions such as bounding boxes, or segmentation maps. </Tip> ### Pad In some cases, for instance, when fine-tuning [DETR](./model_doc/detr), the model applies scale augmentation at training time. This may cause images to be different sizes in a batch. You can use [`DetrImageProcessor.pad`] from [`DetrImageProcessor`] and define a custom `collate_fn` to batch images together. ```py >>> def collate_fn(batch): ... pixel_values = [item["pixel_values"] for item in batch] ... encoding = image_processor.pad(pixel_values, return_tensors="pt") ... labels = [item["labels"] for item in batch] ... batch = {} ... batch["pixel_values"] = encoding["pixel_values"] ... batch["pixel_mask"] = encoding["pixel_mask"] ... batch["labels"] = labels ... return batch ``` ## Multimodal For tasks involving multimodal inputs, you'll need a [processor](main_classes/processors) to prepare your dataset for the model. A processor couples together two processing objects such as tokenizer and feature extractor. Load the [LJ Speech](https://huggingface.co/datasets/lj_speech) dataset (see the 🤗 [Datasets tutorial](https://huggingface.co/docs/datasets/load_hub) for more details on how to load a dataset) to see how you can use a processor for automatic speech recognition (ASR): ```py >>> from datasets import load_dataset >>> lj_speech = load_dataset("lj_speech", split="train") ``` For ASR, you're mainly focused on `audio` and `text` so you can remove the other columns: ```py >>> lj_speech = lj_speech.map(remove_columns=["file", "id", "normalized_text"]) ``` Now take a look at the `audio` and `text` columns: ```py >>> lj_speech[0]["audio"] {'array': array([-7.3242188e-04, -7.6293945e-04, -6.4086914e-04, ..., 7.3242188e-04, 2.1362305e-04, 6.1035156e-05], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/917ece08c95cf0c4115e45294e3cd0dee724a1165b7fc11798369308a465bd26/LJSpeech-1.1/wavs/LJ001-0001.wav', 'sampling_rate': 22050} >>> lj_speech[0]["text"] 'Printing, in the only sense with which we are at present concerned, differs from most if not from all the arts and crafts represented in the Exhibition' ``` Remember you should always [resample](preprocessing#audio) your audio dataset's sampling rate to match the sampling rate of the dataset used to pretrain a model! ```py >>> lj_speech = lj_speech.cast_column("audio", Audio(sampling_rate=16_000)) ``` Load a processor with [`AutoProcessor.from_pretrained`]: ```py >>> from transformers import AutoProcessor >>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h") ``` 1. Create a function to process the audio data contained in `array` to `input_values`, and tokenize `text` to `labels`. These are the inputs to the model: ```py >>> def prepare_dataset(example): ... audio = example["audio"] ... example.update(processor(audio=audio["array"], text=example["text"], sampling_rate=16000)) ... return example ``` 2. Apply the `prepare_dataset` function to a sample: ```py >>> prepare_dataset(lj_speech[0]) ``` The processor has now added `input_values` and `labels`, and the sampling rate has also been correctly downsampled to 16kHz. You can pass your processed dataset to the model now!
transformers/docs/source/en/preprocessing.md/0
{ "file_path": "transformers/docs/source/en/preprocessing.md", "repo_id": "transformers", "token_count": 8688 }
304
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Export to ONNX Deploying 🤗 Transformers models in production environments often requires, or can benefit from exporting the models into a serialized format that can be loaded and executed on specialized runtimes and hardware. 🤗 Optimum is an extension of Transformers that enables exporting models from PyTorch or TensorFlow to serialized formats such as ONNX and TFLite through its `exporters` module. 🤗 Optimum also provides a set of performance optimization tools to train and run models on targeted hardware with maximum efficiency. This guide demonstrates how you can export 🤗 Transformers models to ONNX with 🤗 Optimum, for the guide on exporting models to TFLite, please refer to the [Export to TFLite page](tflite). ## Export to ONNX [ONNX (Open Neural Network eXchange)](http://onnx.ai) is an open standard that defines a common set of operators and a common file format to represent deep learning models in a wide variety of frameworks, including PyTorch and TensorFlow. When a model is exported to the ONNX format, these operators are used to construct a computational graph (often called an _intermediate representation_) which represents the flow of data through the neural network. By exposing a graph with standardized operators and data types, ONNX makes it easy to switch between frameworks. For example, a model trained in PyTorch can be exported to ONNX format and then imported in TensorFlow (and vice versa). Once exported to ONNX format, a model can be: - optimized for inference via techniques such as [graph optimization](https://huggingface.co/docs/optimum/onnxruntime/usage_guides/optimization) and [quantization](https://huggingface.co/docs/optimum/onnxruntime/usage_guides/quantization). - run with ONNX Runtime via [`ORTModelForXXX` classes](https://huggingface.co/docs/optimum/onnxruntime/package_reference/modeling_ort), which follow the same `AutoModel` API as the one you are used to in 🤗 Transformers. - run with [optimized inference pipelines](https://huggingface.co/docs/optimum/main/en/onnxruntime/usage_guides/pipelines), which has the same API as the [`pipeline`] function in 🤗 Transformers. 🤗 Optimum provides support for the ONNX export by leveraging configuration objects. These configuration objects come ready-made for a number of model architectures, and are designed to be easily extendable to other architectures. For the list of ready-made configurations, please refer to [🤗 Optimum documentation](https://huggingface.co/docs/optimum/exporters/onnx/overview). There are two ways to export a 🤗 Transformers model to ONNX, here we show both: - export with 🤗 Optimum via CLI. - export with 🤗 Optimum with `optimum.onnxruntime`. ### Exporting a 🤗 Transformers model to ONNX with CLI To export a 🤗 Transformers model to ONNX, first install an extra dependency: ```bash pip install optimum[exporters] ``` To check out all available arguments, refer to the [🤗 Optimum docs](https://huggingface.co/docs/optimum/exporters/onnx/usage_guides/export_a_model#exporting-a-model-to-onnx-using-the-cli), or view help in command line: ```bash optimum-cli export onnx --help ``` To export a model's checkpoint from the 🤗 Hub, for example, `distilbert/distilbert-base-uncased-distilled-squad`, run the following command: ```bash optimum-cli export onnx --model distilbert/distilbert-base-uncased-distilled-squad distilbert_base_uncased_squad_onnx/ ``` You should see the logs indicating progress and showing where the resulting `model.onnx` is saved, like this: ```bash Validating ONNX model distilbert_base_uncased_squad_onnx/model.onnx... -[✓] ONNX model output names match reference model (start_logits, end_logits) - Validating ONNX Model output "start_logits": -[✓] (2, 16) matches (2, 16) -[✓] all values close (atol: 0.0001) - Validating ONNX Model output "end_logits": -[✓] (2, 16) matches (2, 16) -[✓] all values close (atol: 0.0001) The ONNX export succeeded and the exported model was saved at: distilbert_base_uncased_squad_onnx ``` The example above illustrates exporting a checkpoint from 🤗 Hub. When exporting a local model, first make sure that you saved both the model's weights and tokenizer files in the same directory (`local_path`). When using CLI, pass the `local_path` to the `model` argument instead of the checkpoint name on 🤗 Hub and provide the `--task` argument. You can review the list of supported tasks in the [🤗 Optimum documentation](https://huggingface.co/docs/optimum/exporters/task_manager). If `task` argument is not provided, it will default to the model architecture without any task specific head. ```bash optimum-cli export onnx --model local_path --task question-answering distilbert_base_uncased_squad_onnx/ ``` The resulting `model.onnx` file can then be run on one of the [many accelerators](https://onnx.ai/supported-tools.html#deployModel) that support the ONNX standard. For example, we can load and run the model with [ONNX Runtime](https://onnxruntime.ai/) as follows: ```python >>> from transformers import AutoTokenizer >>> from optimum.onnxruntime import ORTModelForQuestionAnswering >>> tokenizer = AutoTokenizer.from_pretrained("distilbert_base_uncased_squad_onnx") >>> model = ORTModelForQuestionAnswering.from_pretrained("distilbert_base_uncased_squad_onnx") >>> inputs = tokenizer("What am I using?", "Using DistilBERT with ONNX Runtime!", return_tensors="pt") >>> outputs = model(**inputs) ``` The process is identical for TensorFlow checkpoints on the Hub. For instance, here's how you would export a pure TensorFlow checkpoint from the [Keras organization](https://huggingface.co/keras-io): ```bash optimum-cli export onnx --model keras-io/transformers-qa distilbert_base_cased_squad_onnx/ ``` ### Exporting a 🤗 Transformers model to ONNX with `optimum.onnxruntime` Alternative to CLI, you can export a 🤗 Transformers model to ONNX programmatically like so: ```python >>> from optimum.onnxruntime import ORTModelForSequenceClassification >>> from transformers import AutoTokenizer >>> model_checkpoint = "distilbert_base_uncased_squad" >>> save_directory = "onnx/" >>> # Load a model from transformers and export it to ONNX >>> ort_model = ORTModelForSequenceClassification.from_pretrained(model_checkpoint, export=True) >>> tokenizer = AutoTokenizer.from_pretrained(model_checkpoint) >>> # Save the onnx model and tokenizer >>> ort_model.save_pretrained(save_directory) >>> tokenizer.save_pretrained(save_directory) ``` ### Exporting a model for an unsupported architecture If you wish to contribute by adding support for a model that cannot be currently exported, you should first check if it is supported in [`optimum.exporters.onnx`](https://huggingface.co/docs/optimum/exporters/onnx/overview), and if it is not, [contribute to 🤗 Optimum](https://huggingface.co/docs/optimum/exporters/onnx/usage_guides/contribute) directly. ### Exporting a model with `transformers.onnx` <Tip warning={true}> `tranformers.onnx` is no longer maintained, please export models with 🤗 Optimum as described above. This section will be removed in the future versions. </Tip> To export a 🤗 Transformers model to ONNX with `tranformers.onnx`, install extra dependencies: ```bash pip install transformers[onnx] ``` Use `transformers.onnx` package as a Python module to export a checkpoint using a ready-made configuration: ```bash python -m transformers.onnx --model=distilbert/distilbert-base-uncased onnx/ ``` This exports an ONNX graph of the checkpoint defined by the `--model` argument. Pass any checkpoint on the 🤗 Hub or one that's stored locally. The resulting `model.onnx` file can then be run on one of the many accelerators that support the ONNX standard. For example, load and run the model with ONNX Runtime as follows: ```python >>> from transformers import AutoTokenizer >>> from onnxruntime import InferenceSession >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased") >>> session = InferenceSession("onnx/model.onnx") >>> # ONNX Runtime expects NumPy arrays as input >>> inputs = tokenizer("Using DistilBERT with ONNX Runtime!", return_tensors="np") >>> outputs = session.run(output_names=["last_hidden_state"], input_feed=dict(inputs)) ``` The required output names (like `["last_hidden_state"]`) can be obtained by taking a look at the ONNX configuration of each model. For example, for DistilBERT we have: ```python >>> from transformers.models.distilbert import DistilBertConfig, DistilBertOnnxConfig >>> config = DistilBertConfig() >>> onnx_config = DistilBertOnnxConfig(config) >>> print(list(onnx_config.outputs.keys())) ["last_hidden_state"] ``` The process is identical for TensorFlow checkpoints on the Hub. For example, export a pure TensorFlow checkpoint like so: ```bash python -m transformers.onnx --model=keras-io/transformers-qa onnx/ ``` To export a model that's stored locally, save the model's weights and tokenizer files in the same directory (e.g. `local-pt-checkpoint`), then export it to ONNX by pointing the `--model` argument of the `transformers.onnx` package to the desired directory: ```bash python -m transformers.onnx --model=local-pt-checkpoint onnx/ ```
transformers/docs/source/en/serialization.md/0
{ "file_path": "transformers/docs/source/en/serialization.md", "repo_id": "transformers", "token_count": 2972 }
305
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Multiple choice [[open-in-colab]] A multiple choice task is similar to question answering, except several candidate answers are provided along with a context and the model is trained to select the correct answer. This guide will show you how to: 1. Finetune [BERT](https://huggingface.co/google-bert/bert-base-uncased) on the `regular` configuration of the [SWAG](https://huggingface.co/datasets/swag) dataset to select the best answer given multiple options and some context. 2. Use your finetuned model for inference. Before you begin, make sure you have all the necessary libraries installed: ```bash pip install transformers datasets evaluate ``` We encourage you to login to your Hugging Face account so you can upload and share your model with the community. When prompted, enter your token to login: ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` ## Load SWAG dataset Start by loading the `regular` configuration of the SWAG dataset from the 🤗 Datasets library: ```py >>> from datasets import load_dataset >>> swag = load_dataset("swag", "regular") ``` Then take a look at an example: ```py >>> swag["train"][0] {'ending0': 'passes by walking down the street playing their instruments.', 'ending1': 'has heard approaching them.', 'ending2': "arrives and they're outside dancing and asleep.", 'ending3': 'turns the lead singer watches the performance.', 'fold-ind': '3416', 'gold-source': 'gold', 'label': 0, 'sent1': 'Members of the procession walk down the street holding small horn brass instruments.', 'sent2': 'A drum line', 'startphrase': 'Members of the procession walk down the street holding small horn brass instruments. A drum line', 'video-id': 'anetv_jkn6uvmqwh4'} ``` While it looks like there are a lot of fields here, it is actually pretty straightforward: - `sent1` and `sent2`: these fields show how a sentence starts, and if you put the two together, you get the `startphrase` field. - `ending`: suggests a possible ending for how a sentence can end, but only one of them is correct. - `label`: identifies the correct sentence ending. ## Preprocess The next step is to load a BERT tokenizer to process the sentence starts and the four possible endings: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased") ``` The preprocessing function you want to create needs to: 1. Make four copies of the `sent1` field and combine each of them with `sent2` to recreate how a sentence starts. 2. Combine `sent2` with each of the four possible sentence endings. 3. Flatten these two lists so you can tokenize them, and then unflatten them afterward so each example has a corresponding `input_ids`, `attention_mask`, and `labels` field. ```py >>> ending_names = ["ending0", "ending1", "ending2", "ending3"] >>> def preprocess_function(examples): ... first_sentences = [[context] * 4 for context in examples["sent1"]] ... question_headers = examples["sent2"] ... second_sentences = [ ... [f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(question_headers) ... ] ... first_sentences = sum(first_sentences, []) ... second_sentences = sum(second_sentences, []) ... tokenized_examples = tokenizer(first_sentences, second_sentences, truncation=True) ... return {k: [v[i : i + 4] for i in range(0, len(v), 4)] for k, v in tokenized_examples.items()} ``` To apply the preprocessing function over the entire dataset, use 🤗 Datasets [`~datasets.Dataset.map`] method. You can speed up the `map` function by setting `batched=True` to process multiple elements of the dataset at once: ```py tokenized_swag = swag.map(preprocess_function, batched=True) ``` 🤗 Transformers doesn't have a data collator for multiple choice, so you'll need to adapt the [`DataCollatorWithPadding`] to create a batch of examples. It's more efficient to *dynamically pad* the sentences to the longest length in a batch during collation, instead of padding the whole dataset to the maximum length. `DataCollatorForMultipleChoice` flattens all the model inputs, applies padding, and then unflattens the results: <frameworkcontent> <pt> ```py >>> from dataclasses import dataclass >>> from transformers.tokenization_utils_base import PreTrainedTokenizerBase, PaddingStrategy >>> from typing import Optional, Union >>> import torch >>> @dataclass ... class DataCollatorForMultipleChoice: ... """ ... Data collator that will dynamically pad the inputs for multiple choice received. ... """ ... tokenizer: PreTrainedTokenizerBase ... padding: Union[bool, str, PaddingStrategy] = True ... max_length: Optional[int] = None ... pad_to_multiple_of: Optional[int] = None ... def __call__(self, features): ... label_name = "label" if "label" in features[0].keys() else "labels" ... labels = [feature.pop(label_name) for feature in features] ... batch_size = len(features) ... num_choices = len(features[0]["input_ids"]) ... flattened_features = [ ... [{k: v[i] for k, v in feature.items()} for i in range(num_choices)] for feature in features ... ] ... flattened_features = sum(flattened_features, []) ... batch = self.tokenizer.pad( ... flattened_features, ... padding=self.padding, ... max_length=self.max_length, ... pad_to_multiple_of=self.pad_to_multiple_of, ... return_tensors="pt", ... ) ... batch = {k: v.view(batch_size, num_choices, -1) for k, v in batch.items()} ... batch["labels"] = torch.tensor(labels, dtype=torch.int64) ... return batch ``` </pt> <tf> ```py >>> from dataclasses import dataclass >>> from transformers.tokenization_utils_base import PreTrainedTokenizerBase, PaddingStrategy >>> from typing import Optional, Union >>> import tensorflow as tf >>> @dataclass ... class DataCollatorForMultipleChoice: ... """ ... Data collator that will dynamically pad the inputs for multiple choice received. ... """ ... tokenizer: PreTrainedTokenizerBase ... padding: Union[bool, str, PaddingStrategy] = True ... max_length: Optional[int] = None ... pad_to_multiple_of: Optional[int] = None ... def __call__(self, features): ... label_name = "label" if "label" in features[0].keys() else "labels" ... labels = [feature.pop(label_name) for feature in features] ... batch_size = len(features) ... num_choices = len(features[0]["input_ids"]) ... flattened_features = [ ... [{k: v[i] for k, v in feature.items()} for i in range(num_choices)] for feature in features ... ] ... flattened_features = sum(flattened_features, []) ... batch = self.tokenizer.pad( ... flattened_features, ... padding=self.padding, ... max_length=self.max_length, ... pad_to_multiple_of=self.pad_to_multiple_of, ... return_tensors="tf", ... ) ... batch = {k: tf.reshape(v, (batch_size, num_choices, -1)) for k, v in batch.items()} ... batch["labels"] = tf.convert_to_tensor(labels, dtype=tf.int64) ... return batch ``` </tf> </frameworkcontent> ## Evaluate Including a metric during training is often helpful for evaluating your model's performance. You can quickly load a evaluation method with the 🤗 [Evaluate](https://huggingface.co/docs/evaluate/index) library. For this task, load the [accuracy](https://huggingface.co/spaces/evaluate-metric/accuracy) metric (see the 🤗 Evaluate [quick tour](https://huggingface.co/docs/evaluate/a_quick_tour) to learn more about how to load and compute a metric): ```py >>> import evaluate >>> accuracy = evaluate.load("accuracy") ``` Then create a function that passes your predictions and labels to [`~evaluate.EvaluationModule.compute`] to calculate the accuracy: ```py >>> import numpy as np >>> def compute_metrics(eval_pred): ... predictions, labels = eval_pred ... predictions = np.argmax(predictions, axis=1) ... return accuracy.compute(predictions=predictions, references=labels) ``` Your `compute_metrics` function is ready to go now, and you'll return to it when you setup your training. ## Train <frameworkcontent> <pt> <Tip> If you aren't familiar with finetuning a model with the [`Trainer`], take a look at the basic tutorial [here](../training#train-with-pytorch-trainer)! </Tip> You're ready to start training your model now! Load BERT with [`AutoModelForMultipleChoice`]: ```py >>> from transformers import AutoModelForMultipleChoice, TrainingArguments, Trainer >>> model = AutoModelForMultipleChoice.from_pretrained("google-bert/bert-base-uncased") ``` At this point, only three steps remain: 1. Define your training hyperparameters in [`TrainingArguments`]. The only required parameter is `output_dir` which specifies where to save your model. You'll push this model to the Hub by setting `push_to_hub=True` (you need to be signed in to Hugging Face to upload your model). At the end of each epoch, the [`Trainer`] will evaluate the accuracy and save the training checkpoint. 2. Pass the training arguments to [`Trainer`] along with the model, dataset, tokenizer, data collator, and `compute_metrics` function. 3. Call [`~Trainer.train`] to finetune your model. ```py >>> training_args = TrainingArguments( ... output_dir="my_awesome_swag_model", ... eval_strategy="epoch", ... save_strategy="epoch", ... load_best_model_at_end=True, ... learning_rate=5e-5, ... per_device_train_batch_size=16, ... per_device_eval_batch_size=16, ... num_train_epochs=3, ... weight_decay=0.01, ... push_to_hub=True, ... ) >>> trainer = Trainer( ... model=model, ... args=training_args, ... train_dataset=tokenized_swag["train"], ... eval_dataset=tokenized_swag["validation"], ... tokenizer=tokenizer, ... data_collator=DataCollatorForMultipleChoice(tokenizer=tokenizer), ... compute_metrics=compute_metrics, ... ) >>> trainer.train() ``` Once training is completed, share your model to the Hub with the [`~transformers.Trainer.push_to_hub`] method so everyone can use your model: ```py >>> trainer.push_to_hub() ``` </pt> <tf> <Tip> If you aren't familiar with finetuning a model with Keras, take a look at the basic tutorial [here](../training#train-a-tensorflow-model-with-keras)! </Tip> To finetune a model in TensorFlow, start by setting up an optimizer function, learning rate schedule, and some training hyperparameters: ```py >>> from transformers import create_optimizer >>> batch_size = 16 >>> num_train_epochs = 2 >>> total_train_steps = (len(tokenized_swag["train"]) // batch_size) * num_train_epochs >>> optimizer, schedule = create_optimizer(init_lr=5e-5, num_warmup_steps=0, num_train_steps=total_train_steps) ``` Then you can load BERT with [`TFAutoModelForMultipleChoice`]: ```py >>> from transformers import TFAutoModelForMultipleChoice >>> model = TFAutoModelForMultipleChoice.from_pretrained("google-bert/bert-base-uncased") ``` Convert your datasets to the `tf.data.Dataset` format with [`~transformers.TFPreTrainedModel.prepare_tf_dataset`]: ```py >>> data_collator = DataCollatorForMultipleChoice(tokenizer=tokenizer) >>> tf_train_set = model.prepare_tf_dataset( ... tokenized_swag["train"], ... shuffle=True, ... batch_size=batch_size, ... collate_fn=data_collator, ... ) >>> tf_validation_set = model.prepare_tf_dataset( ... tokenized_swag["validation"], ... shuffle=False, ... batch_size=batch_size, ... collate_fn=data_collator, ... ) ``` Configure the model for training with [`compile`](https://keras.io/api/models/model_training_apis/#compile-method). Note that Transformers models all have a default task-relevant loss function, so you don't need to specify one unless you want to: ```py >>> model.compile(optimizer=optimizer) # No loss argument! ``` The last two things to setup before you start training is to compute the accuracy from the predictions, and provide a way to push your model to the Hub. Both are done by using [Keras callbacks](../main_classes/keras_callbacks). Pass your `compute_metrics` function to [`~transformers.KerasMetricCallback`]: ```py >>> from transformers.keras_callbacks import KerasMetricCallback >>> metric_callback = KerasMetricCallback(metric_fn=compute_metrics, eval_dataset=tf_validation_set) ``` Specify where to push your model and tokenizer in the [`~transformers.PushToHubCallback`]: ```py >>> from transformers.keras_callbacks import PushToHubCallback >>> push_to_hub_callback = PushToHubCallback( ... output_dir="my_awesome_model", ... tokenizer=tokenizer, ... ) ``` Then bundle your callbacks together: ```py >>> callbacks = [metric_callback, push_to_hub_callback] ``` Finally, you're ready to start training your model! Call [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) with your training and validation datasets, the number of epochs, and your callbacks to finetune the model: ```py >>> model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=2, callbacks=callbacks) ``` Once training is completed, your model is automatically uploaded to the Hub so everyone can use it! </tf> </frameworkcontent> <Tip> For a more in-depth example of how to finetune a model for multiple choice, take a look at the corresponding [PyTorch notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multiple_choice.ipynb) or [TensorFlow notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multiple_choice-tf.ipynb). </Tip> ## Inference Great, now that you've finetuned a model, you can use it for inference! Come up with some text and two candidate answers: ```py >>> prompt = "France has a bread law, Le Décret Pain, with strict rules on what is allowed in a traditional baguette." >>> candidate1 = "The law does not apply to croissants and brioche." >>> candidate2 = "The law applies to baguettes." ``` <frameworkcontent> <pt> Tokenize each prompt and candidate answer pair and return PyTorch tensors. You should also create some `labels`: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("my_awesome_swag_model") >>> inputs = tokenizer([[prompt, candidate1], [prompt, candidate2]], return_tensors="pt", padding=True) >>> labels = torch.tensor(0).unsqueeze(0) ``` Pass your inputs and labels to the model and return the `logits`: ```py >>> from transformers import AutoModelForMultipleChoice >>> model = AutoModelForMultipleChoice.from_pretrained("my_awesome_swag_model") >>> outputs = model(**{k: v.unsqueeze(0) for k, v in inputs.items()}, labels=labels) >>> logits = outputs.logits ``` Get the class with the highest probability: ```py >>> predicted_class = logits.argmax().item() >>> predicted_class '0' ``` </pt> <tf> Tokenize each prompt and candidate answer pair and return TensorFlow tensors: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("my_awesome_swag_model") >>> inputs = tokenizer([[prompt, candidate1], [prompt, candidate2]], return_tensors="tf", padding=True) ``` Pass your inputs to the model and return the `logits`: ```py >>> from transformers import TFAutoModelForMultipleChoice >>> model = TFAutoModelForMultipleChoice.from_pretrained("my_awesome_swag_model") >>> inputs = {k: tf.expand_dims(v, 0) for k, v in inputs.items()} >>> outputs = model(inputs) >>> logits = outputs.logits ``` Get the class with the highest probability: ```py >>> predicted_class = int(tf.math.argmax(logits, axis=-1)[0]) >>> predicted_class '0' ``` </tf> </frameworkcontent>
transformers/docs/source/en/tasks/multiple_choice.md/0
{ "file_path": "transformers/docs/source/en/tasks/multiple_choice.md", "repo_id": "transformers", "token_count": 5490 }
306
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Testing Let's take a look at how 🤗 Transformers models are tested and how you can write new tests and improve the existing ones. There are 2 test suites in the repository: 1. `tests` -- tests for the general API 2. `examples` -- tests primarily for various applications that aren't part of the API ## How transformers are tested 1. Once a PR is submitted it gets tested with 9 CircleCi jobs. Every new commit to that PR gets retested. These jobs are defined in this [config file](https://github.com/huggingface/transformers/tree/main/.circleci/config.yml), so that if needed you can reproduce the same environment on your machine. These CI jobs don't run `@slow` tests. 2. There are 3 jobs run by [github actions](https://github.com/huggingface/transformers/actions): - [torch hub integration](https://github.com/huggingface/transformers/tree/main/.github/workflows/github-torch-hub.yml): checks whether torch hub integration works. - [self-hosted (push)](https://github.com/huggingface/transformers/tree/main/.github/workflows/self-push.yml): runs fast tests on GPU only on commits on `main`. It only runs if a commit on `main` has updated the code in one of the following folders: `src`, `tests`, `.github` (to prevent running on added model cards, notebooks, etc.) - [self-hosted runner](https://github.com/huggingface/transformers/tree/main/.github/workflows/self-scheduled.yml): runs normal and slow tests on GPU in `tests` and `examples`: ```bash RUN_SLOW=1 pytest tests/ RUN_SLOW=1 pytest examples/ ``` The results can be observed [here](https://github.com/huggingface/transformers/actions). ## Running tests ### Choosing which tests to run This document goes into many details of how tests can be run. If after reading everything, you need even more details you will find them [here](https://docs.pytest.org/en/latest/usage.html). Here are some most useful ways of running tests. Run all: ```console pytest ``` or: ```bash make test ``` Note that the latter is defined as: ```bash python -m pytest -n auto --dist=loadfile -s -v ./tests/ ``` which tells pytest to: - run as many test processes as they are CPU cores (which could be too many if you don't have a ton of RAM!) - ensure that all tests from the same file will be run by the same test process - do not capture output - run in verbose mode ### Getting the list of all tests All tests of the test suite: ```bash pytest --collect-only -q ``` All tests of a given test file: ```bash pytest tests/test_optimization.py --collect-only -q ``` ### Run a specific test module To run an individual test module: ```bash pytest tests/utils/test_logging.py ``` ### Run specific tests Since unittest is used inside most of the tests, to run specific subtests you need to know the name of the unittest class containing those tests. For example, it could be: ```bash pytest tests/test_optimization.py::OptimizationTest::test_adam_w ``` Here: - `tests/test_optimization.py` - the file with tests - `OptimizationTest` - the name of the class - `test_adam_w` - the name of the specific test function If the file contains multiple classes, you can choose to run only tests of a given class. For example: ```bash pytest tests/test_optimization.py::OptimizationTest ``` will run all the tests inside that class. As mentioned earlier you can see what tests are contained inside the `OptimizationTest` class by running: ```bash pytest tests/test_optimization.py::OptimizationTest --collect-only -q ``` You can run tests by keyword expressions. To run only tests whose name contains `adam`: ```bash pytest -k adam tests/test_optimization.py ``` Logical `and` and `or` can be used to indicate whether all keywords should match or either. `not` can be used to negate. To run all tests except those whose name contains `adam`: ```bash pytest -k "not adam" tests/test_optimization.py ``` And you can combine the two patterns in one: ```bash pytest -k "ada and not adam" tests/test_optimization.py ``` For example to run both `test_adafactor` and `test_adam_w` you can use: ```bash pytest -k "test_adafactor or test_adam_w" tests/test_optimization.py ``` Note that we use `or` here, since we want either of the keywords to match to include both. If you want to include only tests that include both patterns, `and` is to be used: ```bash pytest -k "test and ada" tests/test_optimization.py ``` ### Run `accelerate` tests Sometimes you need to run `accelerate` tests on your models. For that you can just add `-m accelerate_tests` to your command, if let's say you want to run these tests on `OPT` run: ```bash RUN_SLOW=1 pytest -m accelerate_tests tests/models/opt/test_modeling_opt.py ``` ### Run documentation tests In order to test whether the documentation examples are correct, you should check that the `doctests` are passing. As an example, let's use [`WhisperModel.forward`'s docstring](https://github.com/huggingface/transformers/blob/1124d95dbb1a3512d3e80791d73d0f541d1d7e9f/src/transformers/models/whisper/modeling_whisper.py#L1591-L1609) ```python r""" Returns: Example: ```python >>> import torch >>> from transformers import WhisperModel, WhisperFeatureExtractor >>> from datasets import load_dataset >>> model = WhisperModel.from_pretrained("openai/whisper-base") >>> feature_extractor = WhisperFeatureExtractor.from_pretrained("openai/whisper-base") >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> inputs = feature_extractor(ds[0]["audio"]["array"], return_tensors="pt") >>> input_features = inputs.input_features >>> decoder_input_ids = torch.tensor([[1, 1]]) * model.config.decoder_start_token_id >>> last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state >>> list(last_hidden_state.shape) [1, 2, 512] ```""" ``` Just run the following line to automatically test every docstring example in the desired file: ```bash pytest --doctest-modules <path_to_file_or_dir> ``` If the file has a markdown extention, you should add the `--doctest-glob="*.md"` argument. ### Run only modified tests You can run the tests related to the unstaged files or the current branch (according to Git) by using [pytest-picked](https://github.com/anapaulagomes/pytest-picked). This is a great way of quickly testing your changes didn't break anything, since it won't run the tests related to files you didn't touch. ```bash pip install pytest-picked ``` ```bash pytest --picked ``` All tests will be run from files and folders which are modified, but not yet committed. ### Automatically rerun failed tests on source modification [pytest-xdist](https://github.com/pytest-dev/pytest-xdist) provides a very useful feature of detecting all failed tests, and then waiting for you to modify files and continuously re-rerun those failing tests until they pass while you fix them. So that you don't need to re start pytest after you made the fix. This is repeated until all tests pass after which again a full run is performed. ```bash pip install pytest-xdist ``` To enter the mode: `pytest -f` or `pytest --looponfail` File changes are detected by looking at `looponfailroots` root directories and all of their contents (recursively). If the default for this value does not work for you, you can change it in your project by setting a configuration option in `setup.cfg`: ```ini [tool:pytest] looponfailroots = transformers tests ``` or `pytest.ini`/``tox.ini`` files: ```ini [pytest] looponfailroots = transformers tests ``` This would lead to only looking for file changes in the respective directories, specified relatively to the ini-file’s directory. [pytest-watch](https://github.com/joeyespo/pytest-watch) is an alternative implementation of this functionality. ### Skip a test module If you want to run all test modules, except a few you can exclude them by giving an explicit list of tests to run. For example, to run all except `test_modeling_*.py` tests: ```bash pytest *ls -1 tests/*py | grep -v test_modeling* ``` ### Clearing state CI builds and when isolation is important (against speed), cache should be cleared: ```bash pytest --cache-clear tests ``` ### Running tests in parallel As mentioned earlier `make test` runs tests in parallel via `pytest-xdist` plugin (`-n X` argument, e.g. `-n 2` to run 2 parallel jobs). `pytest-xdist`'s `--dist=` option allows one to control how the tests are grouped. `--dist=loadfile` puts the tests located in one file onto the same process. Since the order of executed tests is different and unpredictable, if running the test suite with `pytest-xdist` produces failures (meaning we have some undetected coupled tests), use [pytest-replay](https://github.com/ESSS/pytest-replay) to replay the tests in the same order, which should help with then somehow reducing that failing sequence to a minimum. ### Test order and repetition It's good to repeat the tests several times, in sequence, randomly, or in sets, to detect any potential inter-dependency and state-related bugs (tear down). And the straightforward multiple repetition is just good to detect some problems that get uncovered by randomness of DL. #### Repeat tests - [pytest-flakefinder](https://github.com/dropbox/pytest-flakefinder): ```bash pip install pytest-flakefinder ``` And then run every test multiple times (50 by default): ```bash pytest --flake-finder --flake-runs=5 tests/test_failing_test.py ``` <Tip> This plugin doesn't work with `-n` flag from `pytest-xdist`. </Tip> <Tip> There is another plugin `pytest-repeat`, but it doesn't work with `unittest`. </Tip> #### Run tests in a random order ```bash pip install pytest-random-order ``` Important: the presence of `pytest-random-order` will automatically randomize tests, no configuration change or command line options is required. As explained earlier this allows detection of coupled tests - where one test's state affects the state of another. When `pytest-random-order` is installed it will print the random seed it used for that session, e.g: ```bash pytest tests [...] Using --random-order-bucket=module Using --random-order-seed=573663 ``` So that if the given particular sequence fails, you can reproduce it by adding that exact seed, e.g.: ```bash pytest --random-order-seed=573663 [...] Using --random-order-bucket=module Using --random-order-seed=573663 ``` It will only reproduce the exact order if you use the exact same list of tests (or no list at all). Once you start to manually narrowing down the list you can no longer rely on the seed, but have to list them manually in the exact order they failed and tell pytest to not randomize them instead using `--random-order-bucket=none`, e.g.: ```bash pytest --random-order-bucket=none tests/test_a.py tests/test_c.py tests/test_b.py ``` To disable the shuffling for all tests: ```bash pytest --random-order-bucket=none ``` By default `--random-order-bucket=module` is implied, which will shuffle the files on the module levels. It can also shuffle on `class`, `package`, `global` and `none` levels. For the complete details please see its [documentation](https://github.com/jbasko/pytest-random-order). Another randomization alternative is: [`pytest-randomly`](https://github.com/pytest-dev/pytest-randomly). This module has a very similar functionality/interface, but it doesn't have the bucket modes available in `pytest-random-order`. It has the same problem of imposing itself once installed. ### Look and feel variations #### pytest-sugar [pytest-sugar](https://github.com/Frozenball/pytest-sugar) is a plugin that improves the look-n-feel, adds a progressbar, and show tests that fail and the assert instantly. It gets activated automatically upon installation. ```bash pip install pytest-sugar ``` To run tests without it, run: ```bash pytest -p no:sugar ``` or uninstall it. #### Report each sub-test name and its progress For a single or a group of tests via `pytest` (after `pip install pytest-pspec`): ```bash pytest --pspec tests/test_optimization.py ``` #### Instantly shows failed tests [pytest-instafail](https://github.com/pytest-dev/pytest-instafail) shows failures and errors instantly instead of waiting until the end of test session. ```bash pip install pytest-instafail ``` ```bash pytest --instafail ``` ### To GPU or not to GPU On a GPU-enabled setup, to test in CPU-only mode add `CUDA_VISIBLE_DEVICES=""`: ```bash CUDA_VISIBLE_DEVICES="" pytest tests/utils/test_logging.py ``` or if you have multiple gpus, you can specify which one is to be used by `pytest`. For example, to use only the second gpu if you have gpus `0` and `1`, you can run: ```bash CUDA_VISIBLE_DEVICES="1" pytest tests/utils/test_logging.py ``` This is handy when you want to run different tasks on different GPUs. Some tests must be run on CPU-only, others on either CPU or GPU or TPU, yet others on multiple-GPUs. The following skip decorators are used to set the requirements of tests CPU/GPU/TPU-wise: - `require_torch` - this test will run only under torch - `require_torch_gpu` - as `require_torch` plus requires at least 1 GPU - `require_torch_multi_gpu` - as `require_torch` plus requires at least 2 GPUs - `require_torch_non_multi_gpu` - as `require_torch` plus requires 0 or 1 GPUs - `require_torch_up_to_2_gpus` - as `require_torch` plus requires 0 or 1 or 2 GPUs - `require_torch_xla` - as `require_torch` plus requires at least 1 TPU Let's depict the GPU requirements in the following table: | n gpus | decorator | |--------|--------------------------------| | `>= 0` | `@require_torch` | | `>= 1` | `@require_torch_gpu` | | `>= 2` | `@require_torch_multi_gpu` | | `< 2` | `@require_torch_non_multi_gpu` | | `< 3` | `@require_torch_up_to_2_gpus` | For example, here is a test that must be run only when there are 2 or more GPUs available and pytorch is installed: ```python no-style @require_torch_multi_gpu def test_example_with_multi_gpu(): ``` If a test requires `tensorflow` use the `require_tf` decorator. For example: ```python no-style @require_tf def test_tf_thing_with_tensorflow(): ``` These decorators can be stacked. For example, if a test is slow and requires at least one GPU under pytorch, here is how to set it up: ```python no-style @require_torch_gpu @slow def test_example_slow_on_gpu(): ``` Some decorators like `@parametrized` rewrite test names, therefore `@require_*` skip decorators have to be listed last for them to work correctly. Here is an example of the correct usage: ```python no-style @parameterized.expand(...) @require_torch_multi_gpu def test_integration_foo(): ``` This order problem doesn't exist with `@pytest.mark.parametrize`, you can put it first or last and it will still work. But it only works with non-unittests. Inside tests: - How many GPUs are available: ```python from transformers.testing_utils import get_gpu_count n_gpu = get_gpu_count() # works with torch and tf ``` ### Testing with a specific PyTorch backend or device To run the test suite on a specific torch device add `TRANSFORMERS_TEST_DEVICE="$device"` where `$device` is the target backend. For example, to test on CPU only: ```bash TRANSFORMERS_TEST_DEVICE="cpu" pytest tests/utils/test_logging.py ``` This variable is useful for testing custom or less common PyTorch backends such as `mps`, `xpu` or `npu`. It can also be used to achieve the same effect as `CUDA_VISIBLE_DEVICES` by targeting specific GPUs or testing in CPU-only mode. Certain devices will require an additional import after importing `torch` for the first time. This can be specified using the environment variable `TRANSFORMERS_TEST_BACKEND`: ```bash TRANSFORMERS_TEST_BACKEND="torch_npu" pytest tests/utils/test_logging.py ``` Alternative backends may also require the replacement of device-specific functions. For example `torch.cuda.manual_seed` may need to be replaced with a device-specific seed setter like `torch.npu.manual_seed` or `torch.xpu.manual_seed` to correctly set a random seed on the device. To specify a new backend with backend-specific device functions when running the test suite, create a Python device specification file `spec.py` in the format: ```python import torch import torch_npu # for xpu, replace it with `import intel_extension_for_pytorch` # !! Further additional imports can be added here !! # Specify the device name (eg. 'cuda', 'cpu', 'npu', 'xpu', 'mps') DEVICE_NAME = 'npu' # Specify device-specific backends to dispatch to. # If not specified, will fallback to 'default' in 'testing_utils.py` MANUAL_SEED_FN = torch.npu.manual_seed EMPTY_CACHE_FN = torch.npu.empty_cache DEVICE_COUNT_FN = torch.npu.device_count ``` This format also allows for specification of any additional imports required. To use this file to replace equivalent methods in the test suite, set the environment variable `TRANSFORMERS_TEST_DEVICE_SPEC` to the path of the spec file, e.g. `TRANSFORMERS_TEST_DEVICE_SPEC=spec.py`. Currently, only `MANUAL_SEED_FN`, `EMPTY_CACHE_FN` and `DEVICE_COUNT_FN` are supported for device-specific dispatch. ### Distributed training `pytest` can't deal with distributed training directly. If this is attempted - the sub-processes don't do the right thing and end up thinking they are `pytest` and start running the test suite in loops. It works, however, if one spawns a normal process that then spawns off multiple workers and manages the IO pipes. Here are some tests that use it: - [test_trainer_distributed.py](https://github.com/huggingface/transformers/tree/main/tests/trainer/test_trainer_distributed.py) - [test_deepspeed.py](https://github.com/huggingface/transformers/tree/main/tests/deepspeed/test_deepspeed.py) To jump right into the execution point, search for the `execute_subprocess_async` call in those tests. You will need at least 2 GPUs to see these tests in action: ```bash CUDA_VISIBLE_DEVICES=0,1 RUN_SLOW=1 pytest -sv tests/test_trainer_distributed.py ``` ### Output capture During test execution any output sent to `stdout` and `stderr` is captured. If a test or a setup method fails, its according captured output will usually be shown along with the failure traceback. To disable output capturing and to get the `stdout` and `stderr` normally, use `-s` or `--capture=no`: ```bash pytest -s tests/utils/test_logging.py ``` To send test results to JUnit format output: ```bash pytest tests --junitxml=result.xml ``` ### Color control To have no color (e.g., yellow on white background is not readable): ```bash pytest --color=no tests/utils/test_logging.py ``` ### Sending test report to online pastebin service Creating a URL for each test failure: ```bash pytest --pastebin=failed tests/utils/test_logging.py ``` This will submit test run information to a remote Paste service and provide a URL for each failure. You may select tests as usual or add for example -x if you only want to send one particular failure. Creating a URL for a whole test session log: ```bash pytest --pastebin=all tests/utils/test_logging.py ``` ## Writing tests 🤗 transformers tests are based on `unittest`, but run by `pytest`, so most of the time features from both systems can be used. You can read [here](https://docs.pytest.org/en/stable/unittest.html) which features are supported, but the important thing to remember is that most `pytest` fixtures don't work. Neither parametrization, but we use the module `parameterized` that works in a similar way. ### Parametrization Often, there is a need to run the same test multiple times, but with different arguments. It could be done from within the test, but then there is no way of running that test for just one set of arguments. ```python # test_this1.py import unittest from parameterized import parameterized class TestMathUnitTest(unittest.TestCase): @parameterized.expand( [ ("negative", -1.5, -2.0), ("integer", 1, 1.0), ("large fraction", 1.6, 1), ] ) def test_floor(self, name, input, expected): assert_equal(math.floor(input), expected) ``` Now, by default this test will be run 3 times, each time with the last 3 arguments of `test_floor` being assigned the corresponding arguments in the parameter list. and you could run just the `negative` and `integer` sets of params with: ```bash pytest -k "negative and integer" tests/test_mytest.py ``` or all but `negative` sub-tests, with: ```bash pytest -k "not negative" tests/test_mytest.py ``` Besides using the `-k` filter that was just mentioned, you can find out the exact name of each sub-test and run any or all of them using their exact names. ```bash pytest test_this1.py --collect-only -q ``` and it will list: ```bash test_this1.py::TestMathUnitTest::test_floor_0_negative test_this1.py::TestMathUnitTest::test_floor_1_integer test_this1.py::TestMathUnitTest::test_floor_2_large_fraction ``` So now you can run just 2 specific sub-tests: ```bash pytest test_this1.py::TestMathUnitTest::test_floor_0_negative test_this1.py::TestMathUnitTest::test_floor_1_integer ``` The module [parameterized](https://pypi.org/project/parameterized/) which is already in the developer dependencies of `transformers` works for both: `unittests` and `pytest` tests. If, however, the test is not a `unittest`, you may use `pytest.mark.parametrize` (or you may see it being used in some existing tests, mostly under `examples`). Here is the same example, this time using `pytest`'s `parametrize` marker: ```python # test_this2.py import pytest @pytest.mark.parametrize( "name, input, expected", [ ("negative", -1.5, -2.0), ("integer", 1, 1.0), ("large fraction", 1.6, 1), ], ) def test_floor(name, input, expected): assert_equal(math.floor(input), expected) ``` Same as with `parameterized`, with `pytest.mark.parametrize` you can have a fine control over which sub-tests are run, if the `-k` filter doesn't do the job. Except, this parametrization function creates a slightly different set of names for the sub-tests. Here is what they look like: ```bash pytest test_this2.py --collect-only -q ``` and it will list: ```bash test_this2.py::test_floor[integer-1-1.0] test_this2.py::test_floor[negative--1.5--2.0] test_this2.py::test_floor[large fraction-1.6-1] ``` So now you can run just the specific test: ```bash pytest test_this2.py::test_floor[negative--1.5--2.0] test_this2.py::test_floor[integer-1-1.0] ``` as in the previous example. ### Files and directories In tests often we need to know where things are relative to the current test file, and it's not trivial since the test could be invoked from more than one directory or could reside in sub-directories with different depths. A helper class `transformers.test_utils.TestCasePlus` solves this problem by sorting out all the basic paths and provides easy accessors to them: - `pathlib` objects (all fully resolved): - `test_file_path` - the current test file path, i.e. `__file__` - `test_file_dir` - the directory containing the current test file - `tests_dir` - the directory of the `tests` test suite - `examples_dir` - the directory of the `examples` test suite - `repo_root_dir` - the directory of the repository - `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides) - stringified paths---same as above but these return paths as strings, rather than `pathlib` objects: - `test_file_path_str` - `test_file_dir_str` - `tests_dir_str` - `examples_dir_str` - `repo_root_dir_str` - `src_dir_str` To start using those all you need is to make sure that the test resides in a subclass of `transformers.test_utils.TestCasePlus`. For example: ```python from transformers.testing_utils import TestCasePlus class PathExampleTest(TestCasePlus): def test_something_involving_local_locations(self): data_dir = self.tests_dir / "fixtures/tests_samples/wmt_en_ro" ``` If you don't need to manipulate paths via `pathlib` or you just need a path as a string, you can always invoked `str()` on the `pathlib` object or use the accessors ending with `_str`. For example: ```python from transformers.testing_utils import TestCasePlus class PathExampleTest(TestCasePlus): def test_something_involving_stringified_locations(self): examples_dir = self.examples_dir_str ``` ### Temporary files and directories Using unique temporary files and directories are essential for parallel test running, so that the tests won't overwrite each other's data. Also we want to get the temporary files and directories removed at the end of each test that created them. Therefore, using packages like `tempfile`, which address these needs is essential. However, when debugging tests, you need to be able to see what goes into the temporary file or directory and you want to know it's exact path and not having it randomized on every test re-run. A helper class `transformers.test_utils.TestCasePlus` is best used for such purposes. It's a sub-class of `unittest.TestCase`, so we can easily inherit from it in the test modules. Here is an example of its usage: ```python from transformers.testing_utils import TestCasePlus class ExamplesTests(TestCasePlus): def test_whatever(self): tmp_dir = self.get_auto_remove_tmp_dir() ``` This code creates a unique temporary directory, and sets `tmp_dir` to its location. - Create a unique temporary dir: ```python def test_whatever(self): tmp_dir = self.get_auto_remove_tmp_dir() ``` `tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the test. - Create a temporary dir of my choice, ensure it's empty before the test starts and don't empty it after the test. ```python def test_whatever(self): tmp_dir = self.get_auto_remove_tmp_dir("./xxx") ``` This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests didn't leave any data in there. - You can override the default behavior by directly overriding the `before` and `after` args, leading to one of the following behaviors: - `before=True`: the temporary dir will always be cleared at the beginning of the test. - `before=False`: if the temporary dir already existed, any existing files will remain there. - `after=True`: the temporary dir will always be deleted at the end of the test. - `after=False`: the temporary dir will always be left intact at the end of the test. <Tip> In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem will get nuked. i.e. please always pass paths that start with `./`. </Tip> <Tip> Each test can register multiple temporary directories and they all will get auto-removed, unless requested otherwise. </Tip> ### Temporary sys.path override If you need to temporary override `sys.path` to import from another test for example, you can use the `ExtendSysPath` context manager. Example: ```python import os from transformers.testing_utils import ExtendSysPath bindir = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(f"{bindir}/.."): from test_trainer import TrainerIntegrationCommon # noqa ``` ### Skipping tests This is useful when a bug is found and a new test is written, yet the bug is not fixed yet. In order to be able to commit it to the main repository we need make sure it's skipped during `make test`. Methods: - A **skip** means that you expect your test to pass only if some conditions are met, otherwise pytest should skip running the test altogether. Common examples are skipping windows-only tests on non-windows platforms, or skipping tests that depend on an external resource which is not available at the moment (for example a database). - A **xfail** means that you expect a test to fail for some reason. A common example is a test for a feature not yet implemented, or a bug not yet fixed. When a test passes despite being expected to fail (marked with pytest.mark.xfail), it’s an xpass and will be reported in the test summary. One of the important differences between the two is that `skip` doesn't run the test, and `xfail` does. So if the code that's buggy causes some bad state that will affect other tests, do not use `xfail`. #### Implementation - Here is how to skip whole test unconditionally: ```python no-style @unittest.skip(reason="this bug needs to be fixed") def test_feature_x(): ``` or via pytest: ```python no-style @pytest.mark.skip(reason="this bug needs to be fixed") ``` or the `xfail` way: ```python no-style @pytest.mark.xfail def test_feature_x(): ``` Here's how to skip a test based on internal checks within the test: ```python def test_feature_x(): if not has_something(): pytest.skip("unsupported configuration") ``` or the whole module: ```python import pytest if not pytest.config.getoption("--custom-flag"): pytest.skip("--custom-flag is missing, skipping tests", allow_module_level=True) ``` or the `xfail` way: ```python def test_feature_x(): pytest.xfail("expected to fail until bug XYZ is fixed") ``` - Here is how to skip all tests in a module if some import is missing: ```python docutils = pytest.importorskip("docutils", minversion="0.3") ``` - Skip a test based on a condition: ```python no-style @pytest.mark.skipif(sys.version_info < (3,6), reason="requires python3.6 or higher") def test_feature_x(): ``` or: ```python no-style @unittest.skipIf(torch_device == "cpu", "Can't do half precision") def test_feature_x(): ``` or skip the whole module: ```python no-style @pytest.mark.skipif(sys.platform == 'win32', reason="does not run on windows") class TestClass(): def test_feature_x(self): ``` More details, example and ways are [here](https://docs.pytest.org/en/latest/skipping.html). ### Slow tests The library of tests is ever-growing, and some of the tests take minutes to run, therefore we can't afford waiting for an hour for the test suite to complete on CI. Therefore, with some exceptions for essential tests, slow tests should be marked as in the example below: ```python no-style from transformers.testing_utils import slow @slow def test_integration_foo(): ``` Once a test is marked as `@slow`, to run such tests set `RUN_SLOW=1` env var, e.g.: ```bash RUN_SLOW=1 pytest tests ``` Some decorators like `@parameterized` rewrite test names, therefore `@slow` and the rest of the skip decorators `@require_*` have to be listed last for them to work correctly. Here is an example of the correct usage: ```python no-style @parameterized.expand(...) @slow def test_integration_foo(): ``` As explained at the beginning of this document, slow tests get to run on a scheduled basis, rather than in PRs CI checks. So it's possible that some problems will be missed during a PR submission and get merged. Such problems will get caught during the next scheduled CI job. But it also means that it's important to run the slow tests on your machine before submitting the PR. Here is a rough decision making mechanism for choosing which tests should be marked as slow: If the test is focused on one of the library's internal components (e.g., modeling files, tokenization files, pipelines), then we should run that test in the non-slow test suite. If it's focused on an other aspect of the library, such as the documentation or the examples, then we should run these tests in the slow test suite. And then, to refine this approach we should have exceptions: - All tests that need to download a heavy set of weights or a dataset that is larger than ~50MB (e.g., model or tokenizer integration tests, pipeline integration tests) should be set to slow. If you're adding a new model, you should create and upload to the hub a tiny version of it (with random weights) for integration tests. This is discussed in the following paragraphs. - All tests that need to do a training not specifically optimized to be fast should be set to slow. - We can introduce exceptions if some of these should-be-non-slow tests are excruciatingly slow, and set them to `@slow`. Auto-modeling tests, which save and load large files to disk, are a good example of tests that are marked as `@slow`. - If a test completes under 1 second on CI (including downloads if any) then it should be a normal test regardless. Collectively, all the non-slow tests need to cover entirely the different internals, while remaining fast. For example, a significant coverage can be achieved by testing with specially created tiny models with random weights. Such models have the very minimal number of layers (e.g., 2), vocab size (e.g., 1000), etc. Then the `@slow` tests can use large slow models to do qualitative testing. To see the use of these simply look for *tiny* models with: ```bash grep tiny tests examples ``` Here is an example of a [script](https://github.com/huggingface/transformers/tree/main/scripts/fsmt/fsmt-make-tiny-model.py) that created the tiny model [stas/tiny-wmt19-en-de](https://huggingface.co/stas/tiny-wmt19-en-de). You can easily adjust it to your specific model's architecture. It's easy to measure the run-time incorrectly if for example there is an overheard of downloading a huge model, but if you test it locally the downloaded files would be cached and thus the download time not measured. Hence check the execution speed report in CI logs instead (the output of `pytest --durations=0 tests`). That report is also useful to find slow outliers that aren't marked as such, or which need to be re-written to be fast. If you notice that the test suite starts getting slow on CI, the top listing of this report will show the slowest tests. ### Testing the stdout/stderr output In order to test functions that write to `stdout` and/or `stderr`, the test can access those streams using the `pytest`'s [capsys system](https://docs.pytest.org/en/latest/capture.html). Here is how this is accomplished: ```python import sys def print_to_stdout(s): print(s) def print_to_stderr(s): sys.stderr.write(s) def test_result_and_stdout(capsys): msg = "Hello" print_to_stdout(msg) print_to_stderr(msg) out, err = capsys.readouterr() # consume the captured output streams # optional: if you want to replay the consumed streams: sys.stdout.write(out) sys.stderr.write(err) # test: assert msg in out assert msg in err ``` And, of course, most of the time, `stderr` will come as a part of an exception, so try/except has to be used in such a case: ```python def raise_exception(msg): raise ValueError(msg) def test_something_exception(): msg = "Not a good value" error = "" try: raise_exception(msg) except Exception as e: error = str(e) assert msg in error, f"{msg} is in the exception:\n{error}" ``` Another approach to capturing stdout is via `contextlib.redirect_stdout`: ```python from io import StringIO from contextlib import redirect_stdout def print_to_stdout(s): print(s) def test_result_and_stdout(): msg = "Hello" buffer = StringIO() with redirect_stdout(buffer): print_to_stdout(msg) out = buffer.getvalue() # optional: if you want to replay the consumed streams: sys.stdout.write(out) # test: assert msg in out ``` An important potential issue with capturing stdout is that it may contain `\r` characters that in normal `print` reset everything that has been printed so far. There is no problem with `pytest`, but with `pytest -s` these characters get included in the buffer, so to be able to have the test run with and without `-s`, you have to make an extra cleanup to the captured output, using `re.sub(r'~.*\r', '', buf, 0, re.M)`. But, then we have a helper context manager wrapper to automatically take care of it all, regardless of whether it has some `\r`'s in it or not, so it's a simple: ```python from transformers.testing_utils import CaptureStdout with CaptureStdout() as cs: function_that_writes_to_stdout() print(cs.out) ``` Here is a full test example: ```python from transformers.testing_utils import CaptureStdout msg = "Secret message\r" final = "Hello World" with CaptureStdout() as cs: print(msg + final) assert cs.out == final + "\n", f"captured: {cs.out}, expecting {final}" ``` If you'd like to capture `stderr` use the `CaptureStderr` class instead: ```python from transformers.testing_utils import CaptureStderr with CaptureStderr() as cs: function_that_writes_to_stderr() print(cs.err) ``` If you need to capture both streams at once, use the parent `CaptureStd` class: ```python from transformers.testing_utils import CaptureStd with CaptureStd() as cs: function_that_writes_to_stdout_and_stderr() print(cs.err, cs.out) ``` Also, to aid debugging test issues, by default these context managers automatically replay the captured streams on exit from the context. ### Capturing logger stream If you need to validate the output of a logger, you can use `CaptureLogger`: ```python from transformers import logging from transformers.testing_utils import CaptureLogger msg = "Testing 1, 2, 3" logging.set_verbosity_info() logger = logging.get_logger("transformers.models.bart.tokenization_bart") with CaptureLogger(logger) as cl: logger.info(msg) assert cl.out, msg + "\n" ``` ### Testing with environment variables If you want to test the impact of environment variables for a specific test you can use a helper decorator `transformers.testing_utils.mockenv` ```python from transformers.testing_utils import mockenv class HfArgumentParserTest(unittest.TestCase): @mockenv(TRANSFORMERS_VERBOSITY="error") def test_env_override(self): env_level_str = os.getenv("TRANSFORMERS_VERBOSITY", None) ``` At times an external program needs to be called, which requires setting `PYTHONPATH` in `os.environ` to include multiple local paths. A helper class `transformers.test_utils.TestCasePlus` comes to help: ```python from transformers.testing_utils import TestCasePlus class EnvExampleTest(TestCasePlus): def test_external_prog(self): env = self.get_env() # now call the external program, passing `env` to it ``` Depending on whether the test file was under the `tests` test suite or `examples` it'll correctly set up `env[PYTHONPATH]` to include one of these two directories, and also the `src` directory to ensure the testing is done against the current repo, and finally with whatever `env[PYTHONPATH]` was already set to before the test was called if anything. This helper method creates a copy of the `os.environ` object, so the original remains intact. ### Getting reproducible results In some situations you may want to remove randomness for your tests. To get identical reproducible results set, you will need to fix the seed: ```python seed = 42 # python RNG import random random.seed(seed) # pytorch RNGs import torch torch.manual_seed(seed) torch.backends.cudnn.deterministic = True if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed) # numpy RNG import numpy as np np.random.seed(seed) # tf RNG tf.random.set_seed(seed) ``` ### Debugging tests To start a debugger at the point of the warning, do this: ```bash pytest tests/utils/test_logging.py -W error::UserWarning --pdb ``` ## Working with github actions workflows To trigger a self-push workflow CI job, you must: 1. Create a new branch on `transformers` origin (not a fork!). 2. The branch name has to start with either `ci_` or `ci-` (`main` triggers it too, but we can't do PRs on `main`). It also gets triggered only for specific paths - you can find the up-to-date definition in case it changed since this document has been written [here](https://github.com/huggingface/transformers/blob/main/.github/workflows/self-push.yml) under *push:* 3. Create a PR from this branch. 4. Then you can see the job appear [here](https://github.com/huggingface/transformers/actions/workflows/self-push.yml). It may not run right away if there is a backlog. ## Testing Experimental CI Features Testing CI features can be potentially problematic as it can interfere with the normal CI functioning. Therefore if a new CI feature is to be added, it should be done as following. 1. Create a new dedicated job that tests what needs to be tested 2. The new job must always succeed so that it gives us a green ✓ (details below). 3. Let it run for some days to see that a variety of different PR types get to run on it (user fork branches, non-forked branches, branches originating from github.com UI direct file edit, various forced pushes, etc. - there are so many) while monitoring the experimental job's logs (not the overall job green as it's purposefully always green) 4. When it's clear that everything is solid, then merge the new changes into existing jobs. That way experiments on CI functionality itself won't interfere with the normal workflow. Now how can we make the job always succeed while the new CI feature is being developed? Some CIs, like TravisCI support ignore-step-failure and will report the overall job as successful, but CircleCI and Github Actions as of this writing don't support that. So the following workaround can be used: 1. `set +euo pipefail` at the beginning of the run command to suppress most potential failures in the bash script. 2. the last command must be a success: `echo "done"` or just `true` will do Here is an example: ```yaml - run: name: run CI experiment command: | set +euo pipefail echo "setting run-all-despite-any-errors-mode" this_command_will_fail echo "but bash continues to run" # emulate another failure false # but the last command must be a success echo "during experiment do not remove: reporting success to CI, even if there were failures" ``` For simple commands you could also do: ```bash cmd_that_may_fail || true ``` Of course, once satisfied with the results, integrate the experimental step or job with the rest of the normal jobs, while removing `set +euo pipefail` or any other things you may have added to ensure that the experimental job doesn't interfere with the normal CI functioning. This whole process would have been much easier if we only could set something like `allow-failure` for the experimental step, and let it fail without impacting the overall status of PRs. But as mentioned earlier CircleCI and Github Actions don't support it at the moment. You can vote for this feature and see where it is at these CI-specific threads: - [Github Actions:](https://github.com/actions/toolkit/issues/399) - [CircleCI:](https://ideas.circleci.com/ideas/CCI-I-344) ## DeepSpeed integration For a PR that involves the DeepSpeed integration, keep in mind our CircleCI PR CI setup doesn't have GPUs. Tests requiring GPUs are run on a different CI nightly. This means if you get a passing CI report in your PR, it doesn’t mean the DeepSpeed tests pass. To run DeepSpeed tests: ```bash RUN_SLOW=1 pytest tests/deepspeed/test_deepspeed.py ``` Any changes to the modeling or PyTorch examples code requires running the model zoo tests as well. ```bash RUN_SLOW=1 pytest tests/deepspeed ```
transformers/docs/source/en/testing.md/0
{ "file_path": "transformers/docs/source/en/testing.md", "repo_id": "transformers", "token_count": 13508 }
307
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Pipelines para inferencia Un [`pipeline`] simplifica el uso de cualquier modelo del [Hub](https://huggingface.co/models) para la inferencia en una variedad de tareas como la generación de texto, la segmentación de imágenes y la clasificación de audio. Incluso si no tienes experiencia con una modalidad específica o no comprendes el código que alimenta los modelos, ¡aún puedes usarlos con el [`pipeline`]! Este tutorial te enseñará a: * Utilizar un [`pipeline`] para inferencia. * Utilizar un tokenizador o modelo específico. * Utilizar un [`pipeline`] para tareas de audio y visión. <Tip> Echa un vistazo a la documentación de [`pipeline`] para obtener una lista completa de tareas admitidas. </Tip> ## Uso del pipeline Si bien cada tarea tiene un [`pipeline`] asociado, es más sencillo usar la abstracción general [`pipeline`] que contiene todos los pipelines de tareas específicas. El [`pipeline`] carga automáticamente un modelo predeterminado y un tokenizador con capacidad de inferencia para tu tarea. Veamos el ejemplo de usar un [`pipeline`] para reconocimiento automático del habla (ASR), o texto a voz. 1. Comienza creando un [`pipeline`] y específica una tarea de inferencia: ```py >>> from transformers import pipeline >>> transcriber = pipeline(task="automatic-speech-recognition") ``` 2. Pasa tu entrada a la [`pipeline`]. En el caso del reconocimiento del habla, esto es un archivo de entrada de audio: ```py >>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac") {'text': 'I HAVE A DREAM BUT ONE DAY THIS NATION WILL RISE UP LIVE UP THE TRUE MEANING OF ITS TREES'} ``` ¿No es el resultado que tenías en mente? Echa un vistazo a algunos de los [modelos de reconocimiento automático del habla más descargados](https://huggingface.co/models?pipeline_tag=automatic-speech-recognition&sort=trending) en el Hub para ver si puedes obtener una mejor transcripción. Intentemos con el modelo [Whisper large-v2](https://huggingface.co/openai/whisper-large) de OpenAI. Whisper se lanzó 2 años después que Wav2Vec2, y se entrenó con cerca de 10 veces más datos. Como tal, supera a Wav2Vec2 en la mayoría de las pruebas downstream. También tiene el beneficio adicional de predecir puntuación y mayúsculas, ninguno de los cuales es posible con Wav2Vec2. Vamos a probarlo aquí para ver cómo se desempeña: ```py >>> transcriber = pipeline(model="openai/whisper-large-v2") >>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac") {'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its creed.'} ``` ¡Ahora este resultado parece más preciso! Para una comparación detallada de Wav2Vec2 vs Whisper, consulta el [Curso de Transformers de Audio](https://huggingface.co/learn/audio-course/chapter5/asr_models). Realmente te animamos a que eches un vistazo al Hub para modelos en diferentes idiomas, modelos especializados en tu campo, y más. Puedes comparar directamente los resultados de los modelos desde tu navegador en el Hub para ver si se adapta o maneja casos de borde mejor que otros. Y si no encuentras un modelo para tu caso de uso, siempre puedes empezar a [entrenar](training) el tuyo propio. Si tienes varias entradas, puedes pasar tu entrada como una lista: ```py transcriber( [ "https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac", "https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/1.flac", ] ) ``` Los pipelines son ideales para la experimentación, ya que cambiar de un modelo a otro es trivial; sin embargo, hay algunas formas de optimizarlas para cargas de trabajo más grandes que la experimentación. Consulta las siguientes guías que profundizan en iterar sobre conjuntos de datos completos o utilizar pipelines en un servidor web: de la documentación: * [Uso de pipelines en un conjunto de datos](#uso-de-pipelines-en-un-conjunto-de-datos) * [Uso de pipelines para un servidor web](./pipeline_webserver) ## Parámetros [`pipeline`] admite muchos parámetros; algunos son específicos de la tarea y algunos son generales para todas las pipelines. En general, puedes especificar parámetros en cualquier lugar que desees: ```py transcriber = pipeline(model="openai/whisper-large-v2", my_parameter=1) out = transcriber(...) # This will use `my_parameter=1`. out = transcriber(..., my_parameter=2) # This will override and use `my_parameter=2`. out = transcriber(...) # This will go back to using `my_parameter=1`. ``` Vamos a echar un vistazo a tres importantes: ### Device Si usas `device=n`, el pipeline automáticamente coloca el modelo en el dispositivo especificado. Esto funcionará independientemente de si estás utilizando PyTorch o Tensorflow. ```py transcriber = pipeline(model="openai/whisper-large-v2", device=0) ``` Si el modelo es demasiado grande para una sola GPU y estás utilizando PyTorch, puedes establecer `device_map="auto"` para determinar automáticamente cómo cargar y almacenar los pesos del modelo. Utilizar el argumento `device_map` requiere el paquete 🤗 [Accelerate](https://huggingface.co/docs/accelerate): ```bash pip install --upgrade accelerate ``` El siguiente código carga y almacena automáticamente los pesos del modelo en varios dispositivos: ```py transcriber = pipeline(model="openai/whisper-large-v2", device_map="auto") ``` Tenga en cuenta que si se pasa `device_map="auto"`, no es necesario agregar el argumento `device=device` al instanciar tu `pipeline`, ¡ya que podrías encontrar algún comportamiento inesperado! ### Batch size Por defecto, los pipelines no realizarán inferencia por lotes por razones explicadas en detalle [aquí](https://huggingface.co/docs/transformers/main_classes/pipelines#pipeline-batching). La razón es que la agrupación en lotes no es necesariamente más rápida y, de hecho, puede ser bastante más lenta en algunos casos. Pero si funciona en tu caso de uso, puedes utilizar: ```py transcriber = pipeline(model="openai/whisper-large-v2", device=0, batch_size=2) audio_filenames = [f"https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/{i}.flac" for i in range(1, 5)] texts = transcriber(audio_filenames) ``` Esto ejecuta el pipeline en los 4 archivos de audio proporcionados, pero los pasará en lotes de a 2 al modelo (que está en una GPU, donde la agrupación en lotes es más probable que ayude) sin requerir ningún código adicional de tu parte. La salida siempre debería coincidir con lo que habrías recibido sin agrupación en lotes. Solo se pretende como una forma de ayudarte a obtener más velocidad de una pipeline. Los pipelines también pueden aliviar algunas de las complejidades de la agrupación en lotes porque, para algunos pipelines, un solo elemento (como un archivo de audio largo) necesita ser dividido en varias partes para ser procesado por un modelo. El pipeline realiza esta [*agrupación en lotes de fragmentos*](https://huggingface.co/docs/transformers/main_classes/pipelines#pipeline-chunk-batching) por ti. ### Task specific parameters Todas las tareas proporcionan parámetros específicos de la tarea que permiten flexibilidad adicional y opciones para ayudarte a completar tu trabajo. Por ejemplo, el método [`transformers.AutomaticSpeechRecognitionPipeline.__call__`] tiene un parámetro `return_timestamps` que suena prometedor para subtítulos de videos: ```py >>> transcriber = pipeline(model="openai/whisper-large-v2", return_timestamps=True) >>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac") {'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its creed.', 'chunks': [{'timestamp': (0.0, 11.88), 'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its'}, {'timestamp': (11.88, 12.38), 'text': ' creed.'}]} ``` Como puedes ver, el modelo infirió el texto y también salió **cuándo** se pronunciaron las distintas oraciones. Hay muchos parámetros disponibles para cada tarea, así que echa un vistazo a la referencia de la API de cada tarea para ver qué puedes ajustar. Por ejemplo, el [`~transformers.AutomaticSpeechRecognitionPipeline`] tiene un parámetro `chunk_length_s` que es útil para trabajar con archivos de audio realmente largos (por ejemplo, subtítulos de películas completas o videos de una hora de duración) que un modelo típicamente no puede manejar solo: ```python >>> transcriber = pipeline(model="openai/whisper-large-v2", chunk_length_s=30) >>> transcriber("https://huggingface.co/datasets/reach-vb/random-audios/resolve/main/ted_60.wav") {'text': " So in college, I was a government major, which means I had to write a lot of papers. Now, when a normal student writes a paper, they might spread the work out a little like this. So, you know. You get started maybe a little slowly, but you get enough done in the first week that with some heavier days later on, everything gets done and things stay civil. And I would want to do that like that. That would be the plan. I would have it all ready to go, but then actually the paper would come along, and then I would kind of do this. And that would happen every single paper. But then came my 90-page senior thesis, a paper you're supposed to spend a year on. I knew for a paper like that, my normal workflow was not an option, it was way too big a project. So I planned things out and I decided I kind of had to go something like this. This is how the year would go. So I'd start off light and I'd bump it up"} ``` ¡Si no puedes encontrar un parámetro que te ayude, no dudes en [solicitarlo](https://github.com/huggingface/transformers/issues/new?assignees=&labels=feature&template=feature-request.yml)! ## Uso de pipelines en un conjunto de datos Los pipeline también puede ejecutar inferencia en un conjunto de datos grande. La forma más fácil que recomendamos para hacer esto es utilizando un iterador: ```py def data(): for i in range(1000): yield f"My example {i}" pipe = pipeline(model="openai-community/gpt2", device=0) generated_characters = 0 for out in pipe(data()): generated_characters += len(out[0]["generated_text"]) ``` El iterador `data()` produce cada resultado, y el pipeline automáticamente reconoce que la entrada es iterable y comenzará a buscar los datos mientras continúa procesándolos en la GPU (dicho proceso utiliza [DataLoader](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader)). Esto es importante porque no tienes que asignar memoria para todo el conjunto de datos y puedes alimentar la GPU lo más rápido posible. Dado que la agrupación en lotes podría acelerar las cosas, puede ser útil intentar ajustar el parámetro `batch_size` aquí. La forma más sencilla de iterar sobre un conjunto de datos es cargandolo desde 🤗 [Datasets](https://github.com/huggingface/datasets/): ```py # KeyDataset is a util that will just output the item we're interested in. from transformers.pipelines.pt_utils import KeyDataset from datasets import load_dataset pipe = pipeline(model="hf-internal-testing/tiny-random-wav2vec2", device=0) dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation[:10]") for out in pipe(KeyDataset(dataset, "audio")): print(out) ``` ## Uso de pipelines para un servidor web <Tip> Crear un motor de inferencia es un tema complejo que merece su propia página. </Tip> [Link](./pipeline_webserver) ## Pipeline de visión Usar un [`pipeline`] para tareas de visión es prácticamente idéntico. Especifica tu tarea y pasa tu imagen al clasificador. La imagen puede ser un enlace, una ruta local o una imagen codificada en base64. Por ejemplo, ¿qué especie de gato se muestra a continuación? ![pipeline-cat-chonk](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg) ```py >>> from transformers import pipeline >>> vision_classifier = pipeline(model="google/vit-base-patch16-224") >>> preds = vision_classifier( ... images="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" ... ) >>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds] >>> preds [{'score': 0.4335, 'label': 'lynx, catamount'}, {'score': 0.0348, 'label': 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor'}, {'score': 0.0324, 'label': 'snow leopard, ounce, Panthera uncia'}, {'score': 0.0239, 'label': 'Egyptian cat'}, {'score': 0.0229, 'label': 'tiger cat'}] ``` ## Pipeline de texto Usar un [`pipeline`] para tareas de PLN es prácticamente idéntico. ```py >>> from transformers import pipeline >>> # This model is a `zero-shot-classification` model. >>> # It will classify text, except you are free to choose any label you might imagine >>> classifier = pipeline(model="facebook/bart-large-mnli") >>> classifier( ... "I have a problem with my iphone that needs to be resolved asap!!", ... candidate_labels=["urgent", "not urgent", "phone", "tablet", "computer"], ... ) {'sequence': 'I have a problem with my iphone that needs to be resolved asap!!', 'labels': ['urgent', 'phone', 'computer', 'not urgent', 'tablet'], 'scores': [0.504, 0.479, 0.013, 0.003, 0.002]} ``` ## Pipeline multimodal [`pipeline`] admite más de una modalidad. Por ejemplo, una tarea de respuesta a preguntas visuales (VQA) combina texto e imagen. No dudes en usar cualquier enlace de imagen que desees y una pregunta que quieras hacer sobre la imagen. La imagen puede ser una URL o una ruta local a la imagen. Por ejemplo, si usas esta [imagen de factura](https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png): ```py >>> from transformers import pipeline >>> vqa = pipeline(model="impira/layoutlm-document-qa") >>> output = vqa( ... image="https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png", ... question="What is the invoice number?", ... ) >>> output[0]["score"] = round(output[0]["score"], 3) >>> output [{'score': 0.425, 'answer': 'us-001', 'start': 16, 'end': 16}] ``` <Tip> Para ejecutar el ejemplo anterior, debe tener instalado [`pytesseract`](https://pypi.org/project/pytesseract/) además de 🤗 Transformers: ```bash sudo apt install -y tesseract-ocr pip install pytesseract ``` </Tip> ## Uso de `pipeline` en modelos grandes con 🤗 `accelerate`: ¡Puedes ejecutar fácilmente `pipeline` en modelos grandes utilizando 🤗 `accelerate`! Primero asegúrate de haber instalado `accelerate` con `pip install accelerate`. ¡Luego carga tu modelo utilizando `device_map="auto"`! Utilizaremos `facebook/opt-1.3b` para nuestro ejemplo. ```py # pip install accelerate import torch from transformers import pipeline pipe = pipeline(model="facebook/opt-1.3b", torch_dtype=torch.bfloat16, device_map="auto") output = pipe("This is a cool example!", do_sample=True, top_p=0.95) ``` También puedes pasar modelos cargados de 8 bits sí instalas `bitsandbytes` y agregas el argumento `load_in_8bit=True` ```py # pip install accelerate bitsandbytes import torch from transformers import pipeline pipe = pipeline(model="facebook/opt-1.3b", device_map="auto", model_kwargs={"load_in_8bit": True}) output = pipe("This is a cool example!", do_sample=True, top_p=0.95) ``` Nota que puedes reemplazar el punto de control con cualquier modelo de Hugging Face que admita la carga de modelos grandes, como BLOOM. ## Crear demos web desde pipelines con `gradio` Los pipelines están automáticamente soportadas en [Gradio](https://github.com/gradio-app/gradio/), una biblioteca que hace que crear aplicaciones de aprendizaje automático hermosas y fáciles de usar en la web sea un proceso sencillo. Primero, asegúrate de tener Gradio instalado: ``` pip install gradio ``` Luego, puedes crear una demo web alrededor de una pipeline de clasificación de imágenes (o cualquier otra pipeline) en una sola línea de código llamando a la función `Interface.from_pipeline` de Gradio para lanzar la pipeline. Esto crea una interfaz intuitiva *drag-and-drop* en tu navegador: ```py from transformers import pipeline import gradio as gr pipe = pipeline("image-classification", model="google/vit-base-patch16-224") gr.Interface.from_pipeline(pipe).launch() ``` ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/panda-classification.png) De forma predeterminada, la demo web se ejecuta en un servidor local. Si deseas compartirlo con otros, puedes generar un enlace público temporal estableciendo `share=True` en `launch()`. También puedes hospedar tu demo en [Hugging Face Spaces](https://huggingface.co/spaces) para un enlace permanente.
transformers/docs/source/es/pipeline_tutorial.md/0
{ "file_path": "transformers/docs/source/es/pipeline_tutorial.md", "repo_id": "transformers", "token_count": 6257 }
308
# docstyle-ignore INSTALL_CONTENT = """ # Installazione di Transformers ! pip install transformers datasets evaluate accelerate # Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e # rimuovi la modalità commento al comando seguente. # ! pip install git+https://github.com/huggingface/transformers.git """ notebook_first_cells = [{"type": "code", "content": INSTALL_CONTENT}] black_avoid_patterns = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
transformers/docs/source/it/_config.py/0
{ "file_path": "transformers/docs/source/it/_config.py", "repo_id": "transformers", "token_count": 190 }
309
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Modelli multilingue per l'inferenza [[open-in-colab]] Ci sono diversi modelli multilingue in 🤗 Transformers, e il loro utilizzo per l'inferenza differisce da quello dei modelli monolingua. Non *tutti* gli utilizzi dei modelli multilingue sono però diversi. Alcuni modelli, come [google-bert/bert-base-multilingual-uncased](https://huggingface.co/google-bert/bert-base-multilingual-uncased), possono essere usati come un modello monolingua. Questa guida ti mostrerà come utilizzare modelli multilingue che utilizzano un modo diverso per fare l'inferenza. ## XLM XLM ha dieci diversi checkpoint, di cui solo uno è monolingua. I nove checkpoint rimanenti possono essere suddivisi in due categorie: i checkpoint che utilizzano i language embeddings e quelli che non li utilizzano. ### XLM con language embeddings I seguenti modelli XLM utilizzano gli embeddings linguistici per specificare la lingua utilizzata per l'inferenza: - `FacebookAI/xlm-mlm-ende-1024` (Modellazione mascherata del linguaggio (Masked language modeling, in inglese), Inglese-Tedesco) - `FacebookAI/xlm-mlm-enfr-1024` (Modellazione mascherata del linguaggio, Inglese-Francese) - `FacebookAI/xlm-mlm-enro-1024` (Modellazione mascherata del linguaggio, Inglese-Rumeno) - `FacebookAI/xlm-mlm-xnli15-1024` (Modellazione mascherata del linguaggio, lingue XNLI) - `FacebookAI/xlm-mlm-tlm-xnli15-1024` (Modellazione mascherata del linguaggio + traduzione, lingue XNLI) - `FacebookAI/xlm-clm-enfr-1024` (Modellazione causale del linguaggio, Inglese-Francese) - `FacebookAI/xlm-clm-ende-1024` (Modellazione causale del linguaggio, Inglese-Tedesco) Gli embeddings linguistici sono rappresentati come un tensore delle stesse dimensioni dell' `input_ids` passato al modello. I valori in questi tensori dipendono dal linguaggio usato e sono identificati dagli attributi `lang2id` e `id2lang` del tokenizer. In questo esempio, carica il checkpoint `FacebookAI/xlm-clm-enfr-1024` (Modellazione causale del linguaggio, Inglese-Francese): ```py >>> import torch >>> from transformers import XLMTokenizer, XLMWithLMHeadModel >>> tokenizer = XLMTokenizer.from_pretrained("FacebookAI/xlm-clm-enfr-1024") >>> model = XLMWithLMHeadModel.from_pretrained("FacebookAI/xlm-clm-enfr-1024") ``` L'attributo `lang2id` del tokenizer mostra il linguaggio del modello e il suo ids: ```py >>> print(tokenizer.lang2id) {'en': 0, 'fr': 1} ``` Poi, crea un esempio di input: ```py >>> input_ids = torch.tensor([tokenizer.encode("Wikipedia was used to")]) # batch size of 1 ``` Imposta l'id del linguaggio a `"en"` e usalo per definire il language embedding. Il language embedding è un tensore riempito con `0` perché questo è il language id per l'inglese. Questo tensore dovrebbe avere la stessa dimensione di `input_ids`. ```py >>> language_id = tokenizer.lang2id["en"] # 0 >>> langs = torch.tensor([language_id] * input_ids.shape[1]) # torch.tensor([0, 0, 0, ..., 0]) >>> # We reshape it to be of size (batch_size, sequence_length) >>> langs = langs.view(1, -1) # is now of shape [1, sequence_length] (we have a batch size of 1) ``` Adesso puoi inserire `input_ids` e language embedding nel modello: ```py >>> outputs = model(input_ids, langs=langs) ``` Lo script [run_generation.py](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-generation/run_generation.py) può generare testo tramite i language embeddings usando i checkpoints `xlm-clm`. ### XLM senza language embeddings I seguenti modelli XLM non richiedono l'utilizzo dei language embeddings per fare inferenza: - `FacebookAI/xlm-mlm-17-1280` (Modellazione mascherata del linguaggio, 17 lingue) - `FacebookAI/xlm-mlm-100-1280` (Modellazione mascherata del linguaggio, 100 lingue) Questi modelli sono utilizzati per rappresentazioni generiche di frasi, a differenza dei precedenti checkpoints XML. ## BERT Il seguente modello BERT può essere usato per compiti multilingue: - `google-bert/bert-base-multilingual-uncased` (Modellazione mascherata del linguaggio + Previsione della prossima frase, 102 lingue) - `google-bert/bert-base-multilingual-cased` (Modellazione mascherata del linguaggio + Previsione della prossima frase, 104 lingue) Questi modelli non richiedono language embeddings per fare inferenza. Riescono ad identificare il linguaggio dal contesto e inferire di conseguenza. ## XLM-RoBERTa Il seguente modello XLM-RoBERTa può essere usato per compiti multilingue: - `FacebookAI/xlm-roberta-base` (Modellazione mascherata del linguaggio, 100 lingue) - `FacebookAI/xlm-roberta-large` (Modellazione mascherata del linguaggio, 100 lingue) XLM-RoBERTa è stato addestrato su 2.5TB di dati CommonCrawl appena creati e puliti in 100 lingue. Offre notevoli vantaggi rispetto ai modelli multilingue rilasciati in precedenza, come mBERT o XLM, in compiti come la classificazione, l'etichettatura delle sequenze e la risposta alle domande. ## M2M100 Il seguente modello M2M100 può essere usato per compiti multilingue: - `facebook/m2m100_418M` (Traduzione) - `facebook/m2m100_1.2B` (Traduzione) In questo esempio, carica il checkpoint `facebook/m2m100_418M` per tradurre dal cinese all'inglese. Puoi impostare la lingua di partenza nel tokenizer: ```py >>> from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer >>> en_text = "Do not meddle in the affairs of wizards, for they are subtle and quick to anger." >>> chinese_text = "不要插手巫師的事務, 因為他們是微妙的, 很快就會發怒." >>> tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M", src_lang="zh") >>> model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M") ``` Applica il tokenizer al testo: ```py >>> encoded_zh = tokenizer(chinese_text, return_tensors="pt") ``` M2M100 forza l'id della lingua obiettivo come primo token generato per tradurre nella lingua obiettivo. Imposta il parametro `forced_bos_token_id` a `en` nel metodo `generate` per tradurre in inglese: ```py >>> generated_tokens = model.generate(**encoded_zh, forced_bos_token_id=tokenizer.get_lang_id("en")) >>> tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) 'Do not interfere with the matters of the witches, because they are delicate and will soon be angry.' ``` ## MBart Il seguente modello MBart può essere usato per compiti multilingue: - `facebook/mbart-large-50-one-to-many-mmt` (Traduzione automatica multilingue uno-a-molti, 50 lingue) - `facebook/mbart-large-50-many-to-many-mmt` (Traduzione automatica multilingue molti-a-molti, 50 lingue) - `facebook/mbart-large-50-many-to-one-mmt` (Traduzione automatica multilingue molti-a-uno, 50 lingue) - `facebook/mbart-large-50` (Traduzione multilingue, 50 lingue) - `facebook/mbart-large-cc25` In questo esempio, carica il checkpoint `facebook/mbart-large-50-many-to-many-mmt` per tradurre dal finlandese all'inglese. Puoi impostare la lingua di partenza nel tokenizer: ```py >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM >>> en_text = "Do not meddle in the affairs of wizards, for they are subtle and quick to anger." >>> fi_text = "Älä sekaannu velhojen asioihin, sillä ne ovat hienovaraisia ja nopeasti vihaisia." >>> tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-50-many-to-many-mmt", src_lang="fi_FI") >>> model = AutoModelForSeq2SeqLM.from_pretrained("facebook/mbart-large-50-many-to-many-mmt") ``` Applica il tokenizer sul testo: ```py >>> encoded_en = tokenizer(en_text, return_tensors="pt") ``` MBart forza l'id della lingua obiettivo come primo token generato per tradurre nella lingua obiettivo. Imposta il parametro `forced_bos_token_id` a `en` nel metodo `generate` per tradurre in inglese: ```py >>> generated_tokens = model.generate(**encoded_en, forced_bos_token_id=tokenizer.lang_code_to_id("en_XX")) >>> tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) "Don't interfere with the wizard's affairs, because they are subtle, will soon get angry." ``` Se stai usando il checkpoint `facebook/mbart-large-50-many-to-one-mmt`, non hai bisogno di forzare l'id della lingua obiettivo come primo token generato altrimenti l'uso è lo stesso.
transformers/docs/source/it/multilingual.md/0
{ "file_path": "transformers/docs/source/it/multilingual.md", "repo_id": "transformers", "token_count": 3202 }
310
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Glossary この用語集は、一般的な機械学習と 🤗 トランスフォーマーの用語を定義し、ドキュメンテーションをより理解するのに役立ちます。 ## A ### attention mask アテンション マスクは、シーケンスをバッチ処理する際に使用されるオプションの引数です。 <Youtube id="M6adb1j2jPI"/> この引数は、モデルにどのトークンを注視すべきか、どのトークンを注視しないかを示します。 例えば、次の2つのシーケンスを考えてみてください: ```python >>> from transformers import BertTokenizer >>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-cased") >>> sequence_a = "This is a short sequence." >>> sequence_b = "This is a rather long sequence. It is at least longer than the sequence A." >>> encoded_sequence_a = tokenizer(sequence_a)["input_ids"] >>> encoded_sequence_b = tokenizer(sequence_b)["input_ids"] ``` The encoded versions have different lengths: ```python >>> len(encoded_sequence_a), len(encoded_sequence_b) (8, 19) ``` したがって、これらのシーケンスをそのまま同じテンソルに配置することはできません。最初のシーケンスは、 2番目のシーケンスの長さに合わせてパディングする必要があります。または、2番目のシーケンスは、最初のシーケンスの 長さに切り詰める必要があります。 最初の場合、IDのリストはパディングインデックスで拡張されます。トークナイザにリストを渡し、次のようにパディングするように 依頼できます: ```python >>> padded_sequences = tokenizer([sequence_a, sequence_b], padding=True) ``` 0sが追加されて、最初の文が2番目の文と同じ長さになるのがわかります: ```python >>> padded_sequences["input_ids"] [[101, 1188, 1110, 170, 1603, 4954, 119, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [101, 1188, 1110, 170, 1897, 1263, 4954, 119, 1135, 1110, 1120, 1655, 2039, 1190, 1103, 4954, 138, 119, 102]] ``` これは、PyTorchまたはTensorFlowでテンソルに変換できます。注意マスクは、モデルがそれらに注意を払わないように、埋め込まれたインデックスの位置を示すバイナリテンソルです。[`BertTokenizer`]では、`1`は注意を払う必要がある値を示し、`0`は埋め込まれた値を示します。この注意マスクは、トークナイザが返す辞書のキー「attention_mask」の下にあります。 ```python >>> padded_sequences["attention_mask"] [[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ``` ### autoencoding models [エンコーダーモデル](#encoder-models) および [マスク言語モデリング](#masked-language-modeling-mlm) を参照してください。 ### autoregressive models [因果言語モデリング](#causal-language-modeling) および [デコーダーモデル](#decoder-models) を参照してください。 ## B ### backbone バックボーンは、生の隠れた状態や特徴を出力するネットワーク(埋め込みと層)です。通常、特徴を入力として受け取るために [ヘッド](#head) に接続されており、予測を行います。たとえば、[`ViTModel`] は特定のヘッドが上にないバックボーンです。他のモデルも [`VitModel`] をバックボーンとして使用できます、例えば [DPT](model_doc/dpt) です。 ## C ### causal language modeling モデルがテキストを順番に読み、次の単語を予測する事前トレーニングタスクです。通常、モデルは文全体を読み取りますが、特定のタイムステップで未来のトークンを隠すためにモデル内でマスクを使用します。 ### channel カラー画像は、赤、緑、青(RGB)の3つのチャネルの値の組み合わせから成り立っており、グレースケール画像は1つのチャネルしか持ちません。🤗 Transformers では、チャネルは画像のテンソルの最初または最後の次元になることがあります:[`n_channels`, `height`, `width`] または [`height`, `width`, `n_channels`]。 ### connectionist temporal classification (CTC) 入力と出力が正確にどのように整列するかを正確に知らなくてもモデルを学習させるアルゴリズム。CTC は、特定の入力に対してすべての可能な出力の分布を計算し、その中から最も可能性の高い出力を選択します。CTC は、スピーカーの異なる発話速度など、さまざまな理由で音声がトランスクリプトと完全に整合しない場合に、音声認識タスクで一般的に使用されます。 ### convolution ニューラルネットワークの一種で、入力行列が要素ごとに小さな行列(カーネルまたはフィルター)と乗算され、値が新しい行列に合計されるレイヤーのタイプ。これは入力行列全体に対して繰り返される畳み込み操作として知られ、各操作は入力行列の異なるセグメントに適用されます。畳み込みニューラルネットワーク(CNN)は、コンピュータビジョンで一般的に使用されています。 ## D ### decoder input IDs この入力はエンコーダーデコーダーモデルに特有であり、デコーダーに供給される入力IDを含みます。これらの入力は、翻訳や要約などのシーケンスツーシーケンスタスクに使用され、通常、各モデルに固有の方法で構築されます。 ほとんどのエンコーダーデコーダーモデル(BART、T5)は、`labels` から独自に `decoder_input_ids` を作成します。このようなモデルでは、`labels` を渡すことがトレーニングを処理する優れた方法です。 シーケンスツーシーケンストレーニングにおけるこれらの入力IDの処理方法を確認するために、各モデルのドキュメントを確認してください。 ### decoder models オートリグレッションモデルとも呼ばれ、モデルがテキストを順番に読み、次の単語を予測する事前トレーニングタスク(因果言語モデリング)に関与します。通常、モデルは文全体を読み取り、特定のタイムステップで未来のトークンを隠すマスクを使用して行われます。 <Youtube id="d_ixlCubqQw"/> ### deep learning (DL) ニューラルネットワークを使用する機械学習アルゴリズムで、複数の層を持っています。 ## E ### encoder models オートエンコーディングモデルとしても知られており、エンコーダーモデルは入力(テキストや画像など)を、埋め込みと呼ばれる簡略化された数値表現に変換します。エンコーダーモデルは、しばしば[マスクされた言語モデリング(#masked-language-modeling-mlm)](#masked-language-modeling-mlm)などの技術を使用して事前にトレーニングされ、入力シーケンスの一部をマスクし、モデルにより意味のある表現を作成することが強制されます。 <Youtube id="H39Z_720T5s"/> ## F ### feature extraction 生データをより情報豊かで機械学習アルゴリズムにとって有用な特徴のセットに選択および変換するプロセス。特徴抽出の例には、生のテキストを単語埋め込みに変換したり、画像/ビデオデータからエッジや形状などの重要な特徴を抽出したりすることが含まれます。 ### feed forward chunking トランスフォーマー内の各残差注意ブロックでは、通常、自己注意層の後に2つのフィードフォワード層が続きます。 フィードフォワード層の中間埋め込みサイズは、モデルの隠れたサイズよりも大きいことがよくあります(たとえば、`google-bert/bert-base-uncased`の場合)。 入力サイズが `[batch_size、sequence_length]` の場合、中間フィードフォワード埋め込み `[batch_size、sequence_length、config.intermediate_size]` を保存するために必要なメモリは、メモリの大部分を占めることがあります。[Reformer: The Efficient Transformer](https://arxiv.org/abs/2001.04451)の著者は、計算が `sequence_length` 次元に依存しないため、両方のフィードフォワード層の出力埋め込み `[batch_size、config.hidden_size]_0、...、[batch_size、config.hidden_size]_n` を個別に計算し、後で `[batch_size、sequence_length、config.hidden_size]` に連結することは数学的に等価であると気付きました。これにより、増加した計算時間とメモリ使用量のトレードオフが生じますが、数学的に等価な結果が得られます。 [`apply_chunking_to_forward`] 関数を使用するモデルの場合、`chunk_size` は並列に計算される出力埋め込みの数を定義し、メモリと時間の複雑さのトレードオフを定義します。`chunk_size` が 0 に設定されている場合、フィードフォワードのチャンキングは行われません。 ### finetuned models ファインチューニングは、事前にトレーニングされたモデルを取り、その重みを固定し、新しく追加された[model head](#head)で出力レイヤーを置き換える形式の転移学習です。モデルヘッドは対象のデータセットでトレーニングされます。 詳細については、[Fine-tune a pretrained model](https://huggingface.co/docs/transformers/training) チュートリアルを参照して、🤗 Transformersを使用したモデルのファインチューニング方法を学びましょう。 ## H ### head モデルヘッドは、ニューラルネットワークの最後のレイヤーを指し、生の隠れた状態を受け入れて異なる次元に射影します。各タスクに対して異なるモデルヘッドがあります。例えば: * [`GPT2ForSequenceClassification`] は、ベースの[`GPT2Model`]の上にあるシーケンス分類ヘッド(線形層)です。 * [`ViTForImageClassification`] は、ベースの[`ViTModel`]の`CLS`トークンの最終隠れた状態の上にある画像分類ヘッド(線形層)です。 * [`Wav2Vec2ForCTC`] は、[CTC](#connectionist-temporal-classification-ctc)を持つベースの[`Wav2Vec2Model`]の言語モデリングヘッドです。 ## I ### image patch ビジョンベースのトランスフォーマーモデルは、画像をより小さなパッチに分割し、それらを線形に埋め込み、モデルにシーケンスとして渡します。モデルの ### inference 推論は、トレーニングが完了した後に新しいデータでモデルを評価するプロセスです。 🤗 Transformers を使用して推論を実行する方法については、[推論のパイプライン](https://huggingface.co/docs/transformers/pipeline_tutorial) チュートリアルを参照してください。 ### input IDs 入力IDは、モデルへの入力として渡す必要があるパラメーターの中で最も一般的なものです。これらはトークンのインデックスであり、モデルによって入力として使用されるシーケンスを構築するトークンの数値表現です。 <Youtube id="VFp38yj8h3A"/> 各トークナイザーは異なる方法で動作しますが、基本的なメカニズムは同じです。以下はBERTトークナイザーを使用した例です。BERTトークナイザーは[WordPiece](https://arxiv.org/pdf/1609.08144.pdf)トークナイザーです。 ```python >>> from transformers import BertTokenizer >>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-cased") >>> sequence = "A Titan RTX has 24GB of VRAM" ``` トークナイザーは、シーケンスをトークナイザー語彙で使用可能なトークンに分割します。 ```python >>> tokenized_sequence = tokenizer.tokenize(sequence) ``` トークンは単語またはサブワードです。 たとえば、ここでは "VRAM" はモデルの語彙に含まれていなかったため、"V"、"RA"、"M" に分割されました。 これらのトークンが別々の単語ではなく、同じ単語の一部であることを示すために、"RA" と "M" にはダブルハッシュのプレフィックスが追加されます。 ```python >>> print(tokenized_sequence) ['A', 'Titan', 'R', '##T', '##X', 'has', '24', '##GB', 'of', 'V', '##RA', '##M'] ``` これらのトークンは、モデルが理解できるようにIDに変換できます。これは、文をトークナイザーに直接供給して行うことができます。トークナイザーは、パフォーマンスの向上のために[🤗 Tokenizers](https://github.com/huggingface/tokenizers)のRust実装を活用しています。 ```python >>> inputs = tokenizer(sequence) ``` トークナイザーは、対応するモデルが正しく動作するために必要なすべての引数を含む辞書を返します。トークンのインデックスは、キー `input_ids` の下にあります。 ```python >>> encoded_sequence = inputs["input_ids"] >>> print(encoded_sequence) [101, 138, 18696, 155, 1942, 3190, 1144, 1572, 13745, 1104, 159, 9664, 2107, 102] ``` 注意:トークナイザは、関連するモデルがそれらを必要とする場合に自動的に「特別なトークン」を追加します。これらは、モデルが時折使用する特別なIDです。 前のIDシーケンスをデコードする場合、 ```python >>> decoded_sequence = tokenizer.decode(encoded_sequence) ``` 私たちは見ます ```python >>> print(decoded_sequence) [CLS] A Titan RTX has 24GB of VRAM [SEP] ``` これは[`BertModel`]がその入力を期待する方法です。 ## L ### Labels ラベルは、モデルが損失を計算するために渡すことができるオプションの引数です。これらのラベルは、モデルの予測の期待値であるべきです。モデルは、通常の損失を使用して、その予測と期待値(ラベル)との間の損失を計算します。 これらのラベルはモデルのヘッドに応じて異なります。たとえば: - シーケンス分類モデル([`BertForSequenceClassification`])の場合、モデルは次元が `(batch_size)` のテンソルを期待し、バッチ内の各値がシーケンス全体の予測ラベルに対応します。 - トークン分類モデル([`BertForTokenClassification`])の場合、モデルは次元が `(batch_size, seq_length)` のテンソルを期待し、各値が各個々のトークンの予測ラベルに対応します。 - マスク言語モデリングの場合([`BertForMaskedLM`])、モデルは次元が `(batch_size, seq_length)` のテンソルを期待し、各値が各個々のトークンの予測ラベルに対応します。ここでのラベルはマスクされたトークンのトークンIDであり、他のトークンには通常 -100 などの値が設定されます。 - シーケンス間のタスクの場合([`BartForConditionalGeneration`]、[`MBartForConditionalGeneration`])、モデルは次元が `(batch_size, tgt_seq_length)` のテンソルを期待し、各値が各入力シーケンスに関連付けられたターゲットシーケンスに対応します。トレーニング中、BARTとT5の両方は適切な `decoder_input_ids` とデコーダーのアテンションマスクを内部で生成します。通常、これらを提供する必要はありません。これはエンコーダーデコーダーフレームワークを利用するモデルには適用されません。 - 画像分類モデルの場合([`ViTForImageClassification`])、モデルは次元が `(batch_size)` のテンソルを期待し、バッチ内の各値が各個々の画像の予測ラベルに対応します。 - セマンティックセグメンテーションモデルの場合([`SegformerForSemanticSegmentation`])、モデルは次元が `(batch_size, height, width)` のテンソルを期待し、バッチ内の各値が各個々のピクセルの予測ラベルに対応します。 - 物体検出モデルの場合([`DetrForObjectDetection`])、モデルは各個々の画像の予測ラベルと境界ボックスの数に対応する `class_labels` と `boxes` キーを持つ辞書のリストを期待します。 - 自動音声認識モデルの場合([`Wav2Vec2ForCTC`])、モデルは次元が `(batch_size, target_length)` のテンソルを期待し、各値が各個々のトークンの予測ラベルに対応します。 <Tip> 各モデルのラベルは異なる場合があるため、常に各モデルのドキュメントを確認して、それらの特定のラベルに関する詳細情報を確認してください! </Tip> ベースモデル([`BertModel`])はラベルを受け入れません。これらはベースのトランスフォーマーモデルであり、単に特徴を出力します。 ### large language models (LLM) 大量のデータでトレーニングされた変換器言語モデル(GPT-3、BLOOM、OPT)を指す一般的な用語です。これらのモデルは通常、多くの学習可能なパラメータを持っています(たとえば、GPT-3の場合、1750億個)。 ## M ### masked language modeling (MLM) モデルはテキストの破損バージョンを見る事前トレーニングタスクで、通常はランダムに一部のトークンをマスキングして元のテキストを予測する必要があります。 ### multimodal テキストと別の種類の入力(たとえば画像)を組み合わせるタスクです。 ## N ### Natural language generation (NLG) テキストを生成する関連するすべてのタスク(たとえば、[Transformersで書く](https://transformer.huggingface.co/)、翻訳など)。 ### Natural language processing (NLP) テキストを扱う方法を一般的に表現したものです。 ### Natural language understanding (NLU) テキスト内に何があるかを理解する関連するすべてのタスク(たとえば、テキスト全体の分類、個々の単語の分類など)。 ## P ### pipeline 🤗 Transformersのパイプラインは、データの前処理と変換を特定の順序で実行してデータを処理し、モデルから予測を返す一連のステップを指す抽象化です。パイプラインに見られるいくつかのステージの例には、データの前処理、特徴抽出、正規化などがあります。 詳細については、[推論のためのパイプライン](https://huggingface.co/docs/transformers/pipeline_tutorial)を参照してください。 ### pixel values モデルに渡される画像の数値表現のテンソルです。ピクセル値は、形状が [`バッチサイズ`, `チャネル数`, `高さ`, `幅`] の行列で、画像プロセッサから生成されます。 ### pooling 行列を小さな行列に縮小する操作で、プール対象の次元の最大値または平均値を取ることが一般的です。プーリングレイヤーは一般的に畳み込みレイヤーの間に見られ、特徴表現をダウンサンプリングします。 ### position IDs トークンごとの位置が埋め込まれているRNNとは異なり、トランスフォーマーは各トークンの位置を把握していません。したがって、モデルはトークンの位置を識別するために位置ID(`position_ids`)を使用します。 これはオプションのパラメータです。モデルに `position_ids` が渡されない場合、IDは自動的に絶対的な位置埋め込みとして作成されます。 絶対的な位置埋め込みは範囲 `[0、config.max_position_embeddings - 1]` から選択されます。一部のモデルは、正弦波位置埋め込みや相対位置埋め込みなど、他のタイプの位置埋め込みを使用することがあります。 ### preprocessing 生データを機械学習モデルで簡単に処理できる形式に準備するタスクです。例えば、テキストは通常、トークン化によって前処理されます。他の入力タイプに対する前処理の具体的な方法を知りたい場合は、[Preprocess](https://huggingface.co/docs/transformers/preprocessing) チュートリアルをご覧ください。 ### pretrained model あるデータ(たとえば、Wikipedia全体など)で事前に学習されたモデルです。事前学習の方法には、自己教師ありの目的が含まれ、テキストを読み取り、次の単語を予測しようとするもの([因果言語モデリング](#causal-language-modeling)を参照)や、一部の単語をマスクし、それらを予測しようとするもの([マスク言語モデリング](#masked-language-modeling-mlm)を参照)があります。 音声とビジョンモデルには独自の事前学習の目的があります。たとえば、Wav2Vec2は音声モデルで、モデルに対して「真の」音声表現を偽の音声表現のセットから識別する必要がある対比的なタスクで事前学習されています。一方、BEiTはビジョンモデルで、一部の画像パッチをマスクし、モデルにマスクされたパッチを予測させるタスク(マスク言語モデリングの目的と似ています)で事前学習されています。 ## R ### recurrent neural network (RNN) テキストを処理するために層をループさせるモデルの一種です。 ### representation learning 生データの意味のある表現を学習する機械学習のサブフィールドです。表現学習の技術の一部には単語埋め込み、オートエンコーダー、Generative Adversarial Networks(GANs)などがあります。 ## S ### sampling rate 秒ごとに取られるサンプル(オーディオ信号など)の数をヘルツ単位で測定したものです。サンプリングレートは音声などの連続信号を離散化する結果です。 ### self-attention 入力の各要素は、どの他の要素に注意を払うべきかを検出します。 ### self-supervised learning モデルがラベルのないデータから自分自身の学習目標を作成する機械学習技術のカテゴリです。これは[教師なし学習](#unsupervised-learning)や[教師あり学習](#supervised-learning)とは異なり、学習プロセスはユーザーからは明示的には監督されていない点が異なります。 自己教師あり学習の1つの例は[マスク言語モデリング](#masked-language-modeling-mlm)で、モデルには一部のトークンが削除された文が与えられ、欠落したトークンを予測するように学習します。 ### semi-supervised learning ラベル付きデータの少量とラベルのないデータの大量を組み合わせてモデルの精度を向上させる広範な機械学習トレーニング技術のカテゴリです。[教師あり学習](#supervised-learning)や[教師なし学習](#unsupervised-learning)とは異なり、半教師あり学習のアプローチの1つは「セルフトレーニング」であり、モデルはラベル付きデータでトレーニングされ、次にラベルのないデータで予測を行います。モデルが最も自信を持って予測する部分がラベル付きデータセットに追加され、モデルの再トレーニングに使用されます。 ### sequence-to-sequence (seq2seq) 入力から新しいシーケンスを生成するモデルです。翻訳モデルや要約モデル([Bart](model_doc/bart)や[T5](model_doc/t5)など)などがこれに該当します。 ### stride [畳み込み](#convolution)または[プーリング](#pooling)において、ストライドはカーネルが行列上で移動する距離を指します。ストライドが1の場合、カーネルは1ピクセルずつ移動し、ストライドが2の場合、カーネルは2ピクセルずつ移動します。 ### supervised learning モデルのトレーニング方法の一つで、直接ラベル付きデータを使用してモデルの性能を修正し指導します。データがトレーニングされているモデルに供給され、その予測が既知のラベルと比較されます。モデルは予測がどれだけ誤っていたかに基づいて重みを更新し、プロセスはモデルの性能を最適化するために繰り返されます。 ## T ### token 文の一部であり、通常は単語ですが、サブワード(一般的でない単語はしばしばサブワードに分割されることがあります)または句読点の記号であることもあります。 ### token Type IDs 一部のモデルは、文のペアの分類や質問応答を行うことを目的としています。 <Youtube id="0u3ioSwev3s"/> これには異なる2つのシーケンスを単一の「input_ids」エントリに結合する必要があり、通常は分類子(`[CLS]`)や区切り記号(`[SEP]`)などの特別なトークンの助けを借りて実行されます。例えば、BERTモデルは次のように2つのシーケンス入力を構築します: 日本語訳を提供していただきたいです。Markdown形式で記述してください。 ```python >>> # [CLS] SEQUENCE_A [SEP] SEQUENCE_B [SEP] ``` 我々は、前述のように、2つのシーケンスを2つの引数として `tokenizer` に渡すことで、このような文を自動的に生成することができます(以前のようにリストではなく)。以下のように: ```python >>> from transformers import BertTokenizer >>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-cased") >>> sequence_a = "HuggingFace is based in NYC" >>> sequence_b = "Where is HuggingFace based?" >>> encoded_dict = tokenizer(sequence_a, sequence_b) >>> decoded = tokenizer.decode(encoded_dict["input_ids"]) ``` これに対応するコードは以下です: ```python >>> print(decoded) [CLS] HuggingFace is based in NYC [SEP] Where is HuggingFace based? [SEP] ``` 一部のモデルでは、1つのシーケンスがどこで終わり、別のシーケンスがどこで始まるかを理解するのに十分な情報が備わっています。ただし、BERTなどの他のモデルでは、トークンタイプID(セグメントIDとも呼ばれる)も使用されています。これは、モデル内の2つのシーケンスを識別するバイナリマスクとして表されます。 トークナイザは、このマスクを「token_type_ids」として返します。 ```python >>> encoded_dict["token_type_ids"] [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1] ``` 最初のシーケンス、つまり質問のために使用される「コンテキスト」は、すべてのトークンが「0」で表されています。一方、2番目のシーケンス、質問に対応するものは、すべてのトークンが「1」で表されています。 一部のモデル、例えば [`XLNetModel`] のように、追加のトークンが「2」で表されます。 ### transfer learning 事前に学習されたモデルを取り、それをタスク固有のデータセットに適応させる技術。ゼロからモデルを訓練する代わりに、既存のモデルから得た知識を出発点として活用できます。これにより学習プロセスが加速し、必要な訓練データの量が減少します。 ### transformer 自己注意ベースの深層学習モデルアーキテクチャ。 ## U ### unsupervised learning モデルに提供されるデータがラベル付けされていないモデルトレーニングの形態。教師なし学習の技術は、タスクに役立つパターンを見つけるためにデータ分布の統計情報を活用します。
transformers/docs/source/ja/glossary.md/0
{ "file_path": "transformers/docs/source/ja/glossary.md", "repo_id": "transformers", "token_count": 12796 }
311
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Trainer [`Trainer`] クラスは、ほとんどの標準的なユースケースに対して、PyTorch で機能を完全にトレーニングするための API を提供します。これは、[サンプル スクリプト](https://github.com/huggingface/transformers/tree/main/examples) のほとんどで使用されています。 [`Trainer`] をインスタンス化する前に、トレーニング中にカスタマイズのすべてのポイントにアクセスするために [`TrainingArguments`] を作成します。 この API は、複数の GPU/TPU での分散トレーニング、[NVIDIA Apex](https://github.com/NVIDIA/apex) および PyTorch のネイティブ AMP による混合精度をサポートします。 [`Trainer`] には、上記の機能をサポートする基本的なトレーニング ループが含まれています。カスタム動作を挿入するには、それらをサブクラス化し、次のメソッドをオーバーライドします。 - **get_train_dataloader** -- トレーニング データローダーを作成します。 - **get_eval_dataloader** -- 評価用データローダーを作成します。 - **get_test_dataloader** -- テスト データローダーを作成します。 - **log** -- トレーニングを監視しているさまざまなオブジェクトに関する情報をログに記録します。 - **create_optimizer_and_scheduler** -- オプティマイザと学習率スケジューラが渡されなかった場合にセットアップします。 初期化。 `create_optimizer`メソッドと`create_scheduler`メソッドをサブクラス化またはオーバーライドすることもできることに注意してください。 別々に。 - **create_optimizer** -- init で渡されなかった場合にオプティマイザーをセットアップします。 - **create_scheduler** -- init で渡されなかった場合、学習率スケジューラを設定します。 - **compute_loss** - トレーニング入力のバッチの損失を計算します。 - **training_step** -- トレーニング ステップを実行します。 - **prediction_step** -- 評価/テスト ステップを実行します。 - **evaluate** -- 評価ループを実行し、メトリクスを返します。 - **predict** -- テスト セットの予測 (ラベルが使用可能な場合はメトリクスも含む) を返します。 <Tip warning={true}> [`Trainer`] クラスは 🤗 Transformers モデル用に最適化されており、驚くべき動作をする可能性があります 他の機種で使用する場合。独自のモデルで使用する場合は、次の点を確認してください。 - モデルは常に [`~utils.ModelOutput`] のタプルまたはサブクラスを返します。 - `labels` 引数が指定され、その損失が最初の値として返される場合、モデルは損失を計算できます。 タプルの要素 (モデルがタプルを返す場合) - モデルは複数のラベル引数を受け入れることができます ([`TrainingArguments`] で `label_names` を使用して、その名前を [`Trainer`] に示します) が、それらのいずれにも `"label"` という名前を付ける必要はありません。 </Tip> 以下は、加重損失を使用するように [`Trainer`] をカスタマイズする方法の例です (不均衡なトレーニング セットがある場合に役立ちます)。 ```python from torch import nn from transformers import Trainer class CustomTrainer(Trainer): def compute_loss(self, model, inputs, return_outputs=False): labels = inputs.pop("labels") # forward pass outputs = model(**inputs) logits = outputs.get("logits") # compute custom loss (suppose one has 3 labels with different weights) loss_fct = nn.CrossEntropyLoss(weight=torch.tensor([1.0, 2.0, 3.0], device=model.device)) loss = loss_fct(logits.view(-1, self.model.config.num_labels), labels.view(-1)) return (loss, outputs) if return_outputs else loss ``` PyTorch [`Trainer`] のトレーニング ループの動作をカスタマイズするもう 1 つの方法は、トレーニング ループの状態を検査できる [callbacks](コールバック) を使用することです (進行状況レポート、TensorBoard または他の ML プラットフォームでのログ記録など)。決定(早期停止など)。 ## Trainer [[autodoc]] Trainer - all ## Seq2SeqTrainer [[autodoc]] Seq2SeqTrainer - evaluate - predict ## TrainingArguments [[autodoc]] TrainingArguments - all ## Seq2SeqTrainingArguments [[autodoc]] Seq2SeqTrainingArguments - all ## Checkpoints デフォルトでは、[`Trainer`] はすべてのチェックポイントを、 [`TrainingArguments`] を使用しています。これらは、xxx を含む`checkpoint-xxx`という名前のサブフォルダーに保存されます。 それはトレーニングの段階でした。 チェックポイントからトレーニングを再開するには、次のいずれかを使用して [`Trainer.train`] を呼び出します。 - `resume_from_checkpoint=True` は最新のチェックポイントからトレーニングを再開します - `resume_from_checkpoint=checkpoint_dir` ディレクトリ内の特定のチェックポイントからトレーニングを再開します 合格した。 さらに、`push_to_hub=True` を使用すると、モデル ハブにチェックポイントを簡単に保存できます。デフォルトでは、すべて 中間チェックポイントに保存されたモデルは別のコミットに保存されますが、オプティマイザーの状態は保存されません。適応できます [`TrainingArguments`] の `hub-strategy` 値を次のいずれかにします。 - `"checkpoint"`: 最新のチェックポイントも last-checkpoint という名前のサブフォルダーにプッシュされます。 `trainer.train(resume_from_checkpoint="output_dir/last-checkpoint")` を使用してトレーニングを簡単に再開します。 - `"all_checkpoints"`: すべてのチェックポイントは、出力フォルダーに表示されるようにプッシュされます (したがって、1 つのチェックポイントが得られます) 最終リポジトリ内のフォルダーごとのチェックポイント フォルダー) ## Logging デフォルトでは、[`Trainer`] はメインプロセスに `logging.INFO` を使用し、レプリカがある場合には `logging.WARNING` を使用します。 これらのデフォルトは、[`TrainingArguments`] の 5 つの `logging` レベルのいずれかを使用するようにオーバーライドできます。 引数: - `log_level` - メインプロセス用 - `log_level_replica` - レプリカ用 さらに、[`TrainingArguments`] の `log_on_each_node` が `False` に設定されている場合、メイン ノードのみが メイン プロセスのログ レベル設定を使用すると、他のすべてのノードはレプリカのログ レベル設定を使用します。 [`Trainer`] は、`transformers` のログ レベルをノードごとに個別に設定することに注意してください。 [`Trainer.__init__`]。したがって、他の機能を利用する場合は、これをより早く設定することをお勧めします (次の例を参照)。 [`Trainer`] オブジェクトを作成する前の `transformers` 機能。 これをアプリケーションで使用する方法の例を次に示します。 ```python [...] logger = logging.getLogger(__name__) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) # set the main code and the modules it uses to the same log-level according to the node log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) trainer = Trainer(...) ``` そして、メイン ノードと他のすべてのノードで重複する可能性が高いものを出力しないように警告するだけを表示したい場合は、 警告: 次のように実行できます。 ```bash my_app.py ... --log_level warning --log_level_replica error ``` マルチノード環境で、各ノードのメインプロセスのログを繰り返したくない場合は、次のようにします。 上記を次のように変更します。 ```bash my_app.py ... --log_level warning --log_level_replica error --log_on_each_node 0 ``` その後、最初のノードのメイン プロセスのみが「警告」レベルでログに記録され、メイン ノード上の他のすべてのプロセスはログに記録されます。 ノードと他のノード上のすべてのプロセスは「エラー」レベルでログに記録されます。 アプリケーションをできるだけ静かにする必要がある場合は、次のようにします。 ```bash my_app.py ... --log_level error --log_level_replica error --log_on_each_node 0 ``` (マルチノード環境の場合は `--log_on_each_node 0` を追加します) ## Randomness [`Trainer`] によって生成されたチェックポイントから再開する場合、すべての努力がその状態を復元するために行われます。 _python_、_numpy_、および _pytorch_ の RNG 状態は、そのチェックポイントを保存した時点と同じ状態になります。 これにより、「停止して再開」というスタイルのトレーニングが、ノンストップトレーニングに可能な限り近づけられるはずです。 ただし、さまざまなデフォルトの非決定的な pytorch 設定により、これは完全に機能しない可能性があります。フルをご希望の場合は 決定論については、[ランダム性のソースの制御](https://pytorch.org/docs/stable/notes/randomness) を参照してください。ドキュメントで説明されているように、これらの設定の一部は 物事を決定論的にするもの (例: `torch.backends.cudnn.deterministic`) は物事を遅くする可能性があるため、これは デフォルトでは実行できませんが、必要に応じて自分で有効にすることができます。 ## Specific GPUs Selection どの GPU をどのような順序で使用するかをプログラムに指示する方法について説明します。 [`DistributedDataParallel`](https://pytorch.org/docs/stable/generated/torch.nn.Parallel.DistributedDataParallel.html) を使用して GPU のサブセットのみを使用する場合、使用する GPU の数を指定するだけです。 。たとえば、GPU が 4 つあるが、最初の 2 つを使用したい場合は、次のようにします。 ```bash torchrun --nproc_per_node=2 trainer-program.py ... ``` [`accelerate`](https://github.com/huggingface/accelerate) または [`deepspeed`](https://github.com/microsoft/DeepSpeed) がインストールされている場合は、次を使用して同じことを達成することもできます。の一つ: ```bash accelerate launch --num_processes 2 trainer-program.py ... ``` ```bash deepspeed --num_gpus 2 trainer-program.py ... ``` これらのランチャーを使用するために、Accelerate または [Deepspeed 統合](deepspeed) 機能を使用する必要はありません。 これまでは、プログラムに使用する GPU の数を指示できました。次に、特定の GPU を選択し、その順序を制御する方法について説明します。 次の環境変数は、使用する GPU とその順序を制御するのに役立ちます。 **`CUDA_VISIBLE_DEVICES`** 複数の GPU があり、そのうちの 1 つまたはいくつかの GPU だけを使用したい場合は、環境変数 `CUDA_VISIBLE_DEVICES` を使用する GPU のリストに設定します。 たとえば、4 つの GPU (0、1、2、3) があるとします。物理 GPU 0 と 2 のみで実行するには、次のようにします。 ```bash CUDA_VISIBLE_DEVICES=0,2 torchrun trainer-program.py ... ``` したがって、pytorch は 2 つの GPU のみを認識し、物理 GPU 0 と 2 はそれぞれ `cuda:0` と `cuda:1` にマッピングされます。 順序を変更することもできます。 ```bash CUDA_VISIBLE_DEVICES=2,0 torchrun trainer-program.py ... ``` ここでは、物理 GPU 0 と 2 がそれぞれ`cuda:1`と`cuda:0`にマッピングされています。 上記の例はすべて `DistributedDataParallel` 使用パターンのものですが、同じ方法が [`DataParallel`](https://pytorch.org/docs/stable/generated/torch.nn.DataParallel.html) でも機能します。 ```bash CUDA_VISIBLE_DEVICES=2,0 python trainer-program.py ... ``` GPU のない環境をエミュレートするには、次のようにこの環境変数を空の値に設定するだけです。 ```bash CUDA_VISIBLE_DEVICES= python trainer-program.py ... ``` 他の環境変数と同様に、これらをコマンド ラインに追加する代わりに、次のようにエクスポートすることもできます。 ```bash export CUDA_VISIBLE_DEVICES=0,2 torchrun trainer-program.py ... ``` ただし、この方法では、以前に環境変数を設定したことを忘れて、なぜ間違った GPU が使用されているのか理解できない可能性があるため、混乱を招く可能性があります。したがって、このセクションのほとんどの例で示されているように、同じコマンド ラインで特定の実行に対してのみ環境変数を設定するのが一般的です。 **`CUDA_DEVICE_ORDER`** 物理デバイスの順序を制御する追加の環境変数 `CUDA_DEVICE_ORDER` があります。選択肢は次の 2 つです。 1. PCIe バス ID 順 (`nvidia-smi` の順序と一致) - これがデフォルトです。 ```bash export CUDA_DEVICE_ORDER=PCI_BUS_ID ``` 2. GPU コンピューティング能力順に並べる ```bash export CUDA_DEVICE_ORDER=FASTEST_FIRST ``` ほとんどの場合、この環境変数を気にする必要はありませんが、古い GPU と新しい GPU が物理的に挿入されているため、遅い古いカードが遅くなっているように見えるような偏ったセットアップを行っている場合には、非常に役立ちます。初め。これを解決する 1 つの方法は、カードを交換することです。ただし、カードを交換できない場合 (デバイスの冷却が影響を受けた場合など)、`CUDA_DEVICE_ORDER=FASTEST_FIRST`を設定すると、常に新しい高速カードが最初に配置されます。ただし、`nvidia-smi`は依然として PCIe の順序でレポートするため、多少混乱するでしょう。 順序を入れ替えるもう 1 つの解決策は、以下を使用することです。 ```bash export CUDA_VISIBLE_DEVICES=1,0 ``` この例では 2 つの GPU だけを使用していますが、もちろん、コンピューターに搭載されている数の GPU にも同じことが当てはまります。 また、この環境変数を設定する場合は、`~/.bashrc` ファイルまたはその他の起動設定ファイルに設定して、忘れるのが最善です。 ## Trainer Integrations [`Trainer`] は、トレーニングを劇的に改善する可能性のあるライブラリをサポートするように拡張されました。 時間とはるかに大きなモデルに適合します。 現在、サードパーティのソリューション [DeepSpeed](https://github.com/microsoft/DeepSpeed) および [PyTorch FSDP](https://pytorch.org/docs/stable/fsdp.html) をサポートしています。論文 [ZeRO: メモリの最適化兆パラメータ モデルのトレーニングに向けて、Samyam Rajbhandari、Jeff Rasley、Olatunji Ruwase、Yuxiong He 著](https://arxiv.org/abs/1910.02054)。 この提供されるサポートは、この記事の執筆時点では新しくて実験的なものです。 DeepSpeed と PyTorch FSDP のサポートはアクティブであり、それに関する問題は歓迎しますが、FairScale 統合は PyTorch メインに統合されているため、もうサポートしていません ([PyTorch FSDP 統合](#pytorch-fully-sharded-data-parallel)) <a id='zero-install-notes'></a> ### CUDA Extension Installation Notes この記事の執筆時点では、Deepspeed を使用するには、CUDA C++ コードをコンパイルする必要があります。 すべてのインストールの問題は、[Deepspeed](https://github.com/microsoft/DeepSpeed/issues) の対応する GitHub の問題を通じて対処する必要がありますが、ビルド中に発生する可能性のある一般的な問題がいくつかあります。 CUDA 拡張機能を構築する必要がある PyTorch 拡張機能。 したがって、次の操作を実行中に CUDA 関連のビルドの問題が発生した場合は、次のとおりです。 ```bash pip install deepspeed ``` まず次の注意事項をお読みください。 これらのノートでは、`pytorch` が CUDA `10.2` でビルドされた場合に何をすべきかの例を示します。あなたの状況が次のような場合 異なる場合は、バージョン番号を目的のバージョンに調整することを忘れないでください。 #### Possible problem #1 Pytorch には独自の CUDA ツールキットが付属していますが、これら 2 つのプロジェクトをビルドするには、同一バージョンの CUDA が必要です。 システム全体にインストールされます。 たとえば、Python 環境に `cudatoolkit==10.2` を指定して `pytorch` をインストールした場合は、次のものも必要です。 CUDA `10.2` がシステム全体にインストールされました。 正確な場所はシステムによって異なる場合がありますが、多くのシステムでは`/usr/local/cuda-10.2`が最も一般的な場所です。 Unix システム。 CUDA が正しく設定され、`PATH`環境変数に追加されると、 次のようにしてインストール場所を指定します。 ```bash which nvcc ``` CUDA がシステム全体にインストールされていない場合は、最初にインストールしてください。お気に入りを使用して手順を見つけることができます 検索エンジン。たとえば、Ubuntu を使用している場合は、[ubuntu cuda 10.2 install](https://www.google.com/search?q=ubuntu+cuda+10.2+install) を検索するとよいでしょう。 #### Possible problem #2 もう 1 つの考えられる一般的な問題は、システム全体に複数の CUDA ツールキットがインストールされている可能性があることです。たとえばあなた がある可能性があり: ```bash /usr/local/cuda-10.2 /usr/local/cuda-11.0 ``` この状況では、`PATH` および `LD_LIBRARY_PATH` 環境変数に以下が含まれていることを確認する必要があります。 目的の CUDA バージョンへの正しいパス。通常、パッケージ インストーラーは、これらに、 最後のバージョンがインストールされました。適切なパッケージが見つからないためにパッケージのビルドが失敗するという問題が発生した場合は、 CUDA バージョンがシステム全体にインストールされているにもかかわらず、前述の 2 つを調整する必要があることを意味します 環境変数。 まず、その内容を見てみましょう。 ```bash echo $PATH echo $LD_LIBRARY_PATH ``` それで、中に何が入っているかがわかります。 `LD_LIBRARY_PATH` が空である可能性があります。 `PATH` は実行可能ファイルが存在する場所をリストし、`LD_LIBRARY_PATH` は共有ライブラリの場所を示します。 探すことです。どちらの場合も、前のエントリが後のエントリより優先されます。 `:` は複数を区切るために使用されます エントリ。 ここで、ビルド プログラムに特定の CUDA ツールキットの場所を指示するには、最初にリストされる希望のパスを挿入します。 やっていること: ```bash export PATH=/usr/local/cuda-10.2/bin:$PATH export LD_LIBRARY_PATH=/usr/local/cuda-10.2/lib64:$LD_LIBRARY_PATH ``` 既存の値を上書きするのではなく、先頭に追加することに注意してください。 もちろん、必要に応じてバージョン番号やフルパスを調整します。割り当てたディレクトリが実際に機能することを確認してください 存在する。 `lib64` サブディレクトリは、`libcudart.so` などのさまざまな CUDA `.so` オブジェクトが存在する場所です。 システムでは別の名前が付けられますが、現実を反映するように調整してください。 #### Possible problem #3 一部の古い CUDA バージョンは、新しいコンパイラでのビルドを拒否する場合があります。たとえば、あなたは`gcc-9`を持っていますが、それが必要です `gcc-7`。 それにはさまざまな方法があります。 最新の CUDA ツールキットをインストールできる場合は、通常、新しいコンパイラがサポートされているはずです。 あるいは、既に所有しているコンパイラに加えて、下位バージョンのコンパイラをインストールすることもできます。 すでに存在しますが、デフォルトではないため、ビルドシステムはそれを認識できません。 「gcc-7」がインストールされているが、 ビルドシステムが見つからないというメッセージを表示する場合は、次の方法で解決できる可能性があります。 ```bash sudo ln -s /usr/bin/gcc-7 /usr/local/cuda-10.2/bin/gcc sudo ln -s /usr/bin/g++-7 /usr/local/cuda-10.2/bin/g++ ``` ここでは、`/usr/local/cuda-10.2/bin/gcc` から `gcc-7` へのシンボリックリンクを作成しています。 `/usr/local/cuda-10.2/bin/` は `PATH` 環境変数内にある必要があります (前の問題の解決策を参照)。 `gcc-7` (および `g++7`) が見つかるはずで、ビルドは成功します。 いつものように、状況に合わせて例のパスを編集してください。 ### PyTorch Fully Sharded Data parallel より大きなバッチ サイズで巨大なモデルのトレーニングを高速化するには、完全にシャード化されたデータ並列モデルを使用できます。 このタイプのデータ並列パラダイムでは、オプティマイザーの状態、勾配、パラメーターをシャーディングすることで、より多くのデータと大規模なモデルをフィッティングできます。 この機能とその利点の詳細については、[完全シャーディング データ並列ブログ](https://pytorch.org/blog/introducing-pytorch-full-sharded-data-Parallel-api/) をご覧ください。 最新の PyTorch の Fully Sharded Data Parallel (FSDP) トレーニング機能を統合しました。 必要なのは、設定を通じて有効にすることだけです。 **FSDP サポートに必要な PyTorch バージョン**: PyTorch Nightly (リリース後にこれを読んだ場合は 1.12.0) FSDP を有効にしたモデルの保存は、最近の修正でのみ利用できるためです。 **使用法**: - 配布されたランチャーが追加されていることを確認してください まだ使用していない場合は、`-m torch.distributed.launch --nproc_per_node=NUMBER_OF_GPUS_YOU_HAVE`を使用します。 - **シャーディング戦略**: - FULL_SHARD : データ並列ワーカー/GPU にわたるシャード オプティマイザーの状態 + 勾配 + モデル パラメーター。 このためには、コマンドライン引数に`--fsdp full_shard`を追加します。 - SHARD_GRAD_OP : シャード オプティマイザーの状態 + データ並列ワーカー/GPU 全体の勾配。 このためには、コマンドライン引数に`--fsdp shard_grad_op`を追加します。 - NO_SHARD : シャーディングなし。このためには、コマンドライン引数に`--fsdp no_shard`を追加します。 - パラメータと勾配を CPU にオフロードするには、 コマンドライン引数に`--fsdp "full_shard offload"`または`--fsdp "shard_grad_op offload"`を追加します。 - `default_auto_wrap_policy` を使用して FSDP でレイヤーを自動的に再帰的にラップするには、 コマンドライン引数に`--fsdp "full_shard auto_wrap"`または`--fsdp "shard_grad_op auto_wrap"`を追加します。 - CPU オフロードと自動ラッピングの両方を有効にするには、 コマンドライン引数に`--fsdp "full_shard offload auto_wrap"`または`--fsdp "shard_grad_op offload auto_wrap"`を追加します。 - 残りの FSDP 構成は、`--fsdp_config <path_to_fsdp_config.json>`を介して渡されます。それは、次のいずれかの場所です。 FSDP json 構成ファイル (例: `fsdp_config.json`)、またはすでにロードされている json ファイルを `dict` として使用します。 - 自動ラッピングが有効な場合は、トランスベースの自動ラップ ポリシーまたはサイズ ベースの自動ラップ ポリシーを使用できます。 - トランスフォーマーベースの自動ラップポリシーの場合、構成ファイルで `fsdp_transformer_layer_cls_to_wrap` を指定することをお勧めします。指定しない場合、使用可能な場合、デフォルト値は `model._no_split_modules` になります。 これは、ラップするトランスフォーマー層クラス名のリスト (大文字と小文字を区別) を指定します (例: [`BertLayer`]、[`GPTJBlock`]、[`T5Block`] ...)。 重みを共有するサブモジュール (埋め込み層など) が異なる FSDP ラップされたユニットにならないようにする必要があるため、これは重要です。 このポリシーを使用すると、マルチヘッド アテンションとそれに続くいくつかの MLP レイヤーを含むブロックごとにラッピングが発生します。 共有埋め込みを含む残りの層は、同じ最も外側の FSDP ユニットにラップされるのが便利です。 したがって、トランスベースのモデルにはこれを使用してください。 - サイズベースの自動ラップポリシーの場合は、設定ファイルに`fsdp_min_num_params`を追加してください。 自動ラッピングのための FSDP のパラメータの最小数を指定します。 - 設定ファイルで `fsdp_backward_prefetch` を指定できるようになりました。次のパラメータのセットをいつプリフェッチするかを制御します。 `backward_pre` と `backward_pos` が利用可能なオプションです。 詳細については、`torch.distributed.fsdp.full_sharded_data_Parallel.BackwardPrefetch`を参照してください。 - 設定ファイルで `fsdp_forward_prefetch` を指定できるようになりました。次のパラメータのセットをいつプリフェッチするかを制御します。 `True`の場合、FSDP はフォワード パスでの実行中に、次に来るオールギャザーを明示的にプリフェッチします。 - 設定ファイルで `limit_all_gathers` を指定できるようになりました。 `True`の場合、FSDP は CPU スレッドを明示的に同期して、実行中のオールギャザが多すぎるのを防ぎます。 - `activation_checkpointing`を設定ファイルで指定できるようになりました。 `True`の場合、FSDP アクティベーション チェックポイントは、FSDP のアクティベーションをクリアすることでメモリ使用量を削減する手法です。 特定のレイヤーを処理し、バックワード パス中にそれらを再計算します。事実上、これは余分な計算時間を犠牲にします メモリ使用量を削減します。 **注意すべき注意点がいくつかあります** - これは `generate` と互換性がないため、 `--predict_with_generate` とも互換性がありません すべての seq2seq/clm スクリプト (翻訳/要約/clm など)。 問題 [#21667](https://github.com/huggingface/transformers/issues/21667) を参照してください。 ### PyTorch/XLA Fully Sharded Data parallel TPU ユーザーの皆様に朗報です。 PyTorch/XLA は FSDP をサポートするようになりました。 最新の Fully Sharded Data Parallel (FSDP) トレーニングがすべてサポートされています。 詳細については、[FSDP を使用した Cloud TPU での PyTorch モデルのスケーリング](https://pytorch.org/blog/scaling-pytorch-models-on-cloud-tpus-with-fsdp/) および [PyTorch/XLA 実装 を参照してください。 FSDP の](https://github.com/pytorch/xla/tree/master/torch_xla/distributed/fsdp) 必要なのは、設定を通じて有効にすることだけです。 **FSDP サポートに必要な PyTorch/XLA バージョン**: >=2.0 **使用法**: `--fsdp "full shard"` を、`--fsdp_config <path_to_fsdp_config.json>` に加えられる次の変更とともに渡します。 - PyTorch/XLA FSDP を有効にするには、`xla`を`True`に設定する必要があります。 - `xla_fsdp_settings` 値は、XLA FSDP ラッピング パラメータを格納する辞書です。 オプションの完全なリストについては、[こちら]( https://github.com/pytorch/xla/blob/master/torch_xla/distributed/fsdp/xla_full_sharded_data_Parallel.py)。 - `xla_fsdp_grad_ckpt`。 `True`の場合、ネストされた XLA FSDP でラップされた各レイヤー上で勾配チェックポイントを使用します。 この設定は、xla フラグが true に設定されており、自動ラッピング ポリシーが指定されている場合にのみ使用できます。 `fsdp_min_num_params` または `fsdp_transformer_layer_cls_to_wrap`。 - トランスフォーマー ベースの自動ラップ ポリシーまたはサイズ ベースの自動ラップ ポリシーのいずれかを使用できます。 - トランスフォーマーベースの自動ラップポリシーの場合、構成ファイルで `fsdp_transformer_layer_cls_to_wrap` を指定することをお勧めします。指定しない場合、使用可能な場合、デフォルト値は `model._no_split_modules` になります。 これは、ラップするトランスフォーマー層クラス名のリスト (大文字と小文字を区別) を指定します (例: [`BertLayer`]、[`GPTJBlock`]、[`T5Block`] ...)。 重みを共有するサブモジュール (埋め込み層など) が異なる FSDP ラップされたユニットにならないようにする必要があるため、これは重要です。 このポリシーを使用すると、マルチヘッド アテンションとそれに続くいくつかの MLP レイヤーを含むブロックごとにラッピングが発生します。 共有埋め込みを含む残りの層は、同じ最も外側の FSDP ユニットにラップされるのが便利です。 したがって、トランスベースのモデルにはこれを使用してください。 - サイズベースの自動ラップポリシーの場合は、設定ファイルに`fsdp_min_num_params`を追加してください。 自動ラッピングのための FSDP のパラメータの最小数を指定します。 ### Using Trainer for accelerated PyTorch Training on Mac PyTorch v1.12 リリースにより、開発者と研究者は Apple シリコン GPU を利用してモデル トレーニングを大幅に高速化できます。 これにより、プロトタイピングや微調整などの機械学習ワークフローを Mac 上でローカルで実行できるようになります。 PyTorch のバックエンドとしての Apple の Metal Performance Shaders (MPS) はこれを可能にし、新しい `"mps"` デバイス経由で使用できます。 これにより、計算グラフとプリミティブが MPS Graph フレームワークと MPS によって提供される調整されたカーネルにマッピングされます。 詳細については、公式ドキュメント [Mac での Accelerated PyTorch Training の紹介](https://pytorch.org/blog/introducing-accelerated-pytorch-training-on-mac/) を参照してください。 および [MPS バックエンド](https://pytorch.org/docs/stable/notes/mps.html)。 <Tip warning={false}> MacOS マシンに PyTorch >= 1.13 (執筆時点ではナイトリー バージョン) をインストールすることを強くお勧めします。 トランスベースのモデルのモデルの正確性とパフォーマンスの向上に関連する主要な修正が行われています。 詳細については、https://github.com/pytorch/pytorch/issues/82707 を参照してください。 </Tip> **Apple Silicon チップを使用したトレーニングと推論の利点** 1. ユーザーがローカルで大規模なネットワークやバッチ サイズをトレーニングできるようにします 2. ユニファイド メモリ アーキテクチャにより、データ取得の遅延が短縮され、GPU がメモリ ストア全体に直接アクセスできるようになります。 したがって、エンドツーエンドのパフォーマンスが向上します。 3. クラウドベースの開発に関連するコストや追加のローカル GPU の必要性を削減します。 **前提条件**: mps サポートを備えたトーチをインストールするには、 この素晴らしいメディア記事 [GPU アクセラレーションが M1 Mac の PyTorch に登場](https://medium.com/towards-data-science/gpu-acceleration-comes-to-pytorch-on-m1-macs-195c399efcc1) に従ってください。 。 **使用法**: `mps` デバイスは、`cuda` デバイスが使用される方法と同様に利用可能な場合、デフォルトで使用されます。 したがって、ユーザーによるアクションは必要ありません。 たとえば、以下のコマンドを使用して、Apple Silicon GPU を使用して公式の Glue テキスト分類タスクを (ルート フォルダーから) 実行できます。 ```bash export TASK_NAME=mrpc python examples/pytorch/text-classification/run_glue.py \ --model_name_or_path google-bert/bert-base-cased \ --task_name $TASK_NAME \ --do_train \ --do_eval \ --max_seq_length 128 \ --per_device_train_batch_size 32 \ --learning_rate 2e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$TASK_NAME/ \ --overwrite_output_dir ``` **注意すべきいくつかの注意事項** 1. 一部の PyTorch 操作は mps に実装されていないため、エラーがスローされます。 これを回避する 1 つの方法は、環境変数 `PYTORCH_ENABLE_MPS_FALLBACK=1` を設定することです。 これらの操作では CPU にフォールバックします。ただし、それでも UserWarning がスローされます。 2. 分散セットアップ`gloo`および`nccl`は、`mps`デバイスでは動作しません。 これは、現在「mps」デバイス タイプの単一 GPU のみを使用できることを意味します。 最後に、覚えておいてください。 🤗 `Trainer` は MPS バックエンドのみを統合するため、 MPS バックエンドの使用に関して問題や質問がある場合は、 [PyTorch GitHub](https://github.com/pytorch/pytorch/issues) に問題を提出してください。 ## Using Accelerate Launcher with Trainer 加速してトレーナーにパワーを与えましょう。ユーザーが期待することに関しては、次のとおりです。 - トレーナー引数に対して FSDP、DeepSpeed などのトレーナー インテレーションを変更せずに使用し続けることができます。 - トレーナーで Accelerate Launcher を使用できるようになりました (推奨)。 トレーナーで Accelerate Launcher を使用する手順: 1. 🤗 Accelerate がインストールされていることを確認してください。Accelerate がないと `Trainer` を使用することはできません。そうでない場合は、`pip install accelerate`してください。 Accelerate のバージョンを更新する必要がある場合もあります: `pip install activate --upgrade` 2. `accelerate config`を実行し、アンケートに記入します。以下は加速設定の例です。 a. DDP マルチノード マルチ GPU 構成: ```yaml compute_environment: LOCAL_MACHINE distributed_type: MULTI_GPU downcast_bf16: 'no' gpu_ids: all machine_rank: 0 #change rank as per the node main_process_ip: 192.168.20.1 main_process_port: 9898 main_training_function: main mixed_precision: fp16 num_machines: 2 num_processes: 8 rdzv_backend: static same_network: true tpu_env: [] tpu_use_cluster: false tpu_use_sudo: false use_cpu: false ``` b. FSDP config: ```yaml compute_environment: LOCAL_MACHINE distributed_type: FSDP downcast_bf16: 'no' fsdp_config: fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP fsdp_backward_prefetch_policy: BACKWARD_PRE fsdp_forward_prefetch: true fsdp_offload_params: false fsdp_sharding_strategy: 1 fsdp_state_dict_type: FULL_STATE_DICT fsdp_sync_module_states: true fsdp_transformer_layer_cls_to_wrap: BertLayer fsdp_use_orig_params: true machine_rank: 0 main_training_function: main mixed_precision: bf16 num_machines: 1 num_processes: 2 rdzv_backend: static same_network: true tpu_env: [] tpu_use_cluster: false tpu_use_sudo: false use_cpu: false ``` c.ファイルを指す DeepSpeed 構成: ```yaml compute_environment: LOCAL_MACHINE deepspeed_config: deepspeed_config_file: /home/user/configs/ds_zero3_config.json zero3_init_flag: true distributed_type: DEEPSPEED downcast_bf16: 'no' machine_rank: 0 main_training_function: main num_machines: 1 num_processes: 4 rdzv_backend: static same_network: true tpu_env: [] tpu_use_cluster: false tpu_use_sudo: false use_cpu: false ``` d.加速プラグインを使用した DeepSpeed 構成: ```yaml compute_environment: LOCAL_MACHINE deepspeed_config: gradient_accumulation_steps: 1 gradient_clipping: 0.7 offload_optimizer_device: cpu offload_param_device: cpu zero3_init_flag: true zero_stage: 2 distributed_type: DEEPSPEED downcast_bf16: 'no' machine_rank: 0 main_training_function: main mixed_precision: bf16 num_machines: 1 num_processes: 4 rdzv_backend: static same_network: true tpu_env: [] tpu_use_cluster: false tpu_use_sudo: false use_cpu: false ``` 3. 加速設定またはランチャー引数によって上記で処理された引数以外の引数を使用して、トレーナー スクリプトを実行します。 以下は、上記の FSDP 構成で`accelerate launcher`を使用して`run_glue.py`を実行する例です。 ```bash cd transformers accelerate launch \ ./examples/pytorch/text-classification/run_glue.py \ --model_name_or_path google-bert/bert-base-cased \ --task_name $TASK_NAME \ --do_train \ --do_eval \ --max_seq_length 128 \ --per_device_train_batch_size 16 \ --learning_rate 5e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$TASK_NAME/ \ --overwrite_output_dir ``` 4. `accelerate launch`するための cmd 引数を直接使用することもできます。上の例は次のようにマッピングされます。 ```bash cd transformers accelerate launch --num_processes=2 \ --use_fsdp \ --mixed_precision=bf16 \ --fsdp_auto_wrap_policy=TRANSFORMER_BASED_WRAP \ --fsdp_transformer_layer_cls_to_wrap="BertLayer" \ --fsdp_sharding_strategy=1 \ --fsdp_state_dict_type=FULL_STATE_DICT \ ./examples/pytorch/text-classification/run_glue.py --model_name_or_path google-bert/bert-base-cased \ --task_name $TASK_NAME \ --do_train \ --do_eval \ --max_seq_length 128 \ --per_device_train_batch_size 16 \ --learning_rate 5e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$TASK_NAME/ \ --overwrite_output_dir ``` 詳細については、🤗 Accelerate CLI ガイドを参照してください: [🤗 Accelerate スクリプトの起動](https://huggingface.co/docs/accelerate/basic_tutorials/launch)。 移動されたセクション: [ <a href="./deepspeed#deepspeed-trainer-integration">DeepSpeed</a><a id="deepspeed"></a> | <a href="./deepspeed#deepspeed-installation">Installation</a><a id="installation"></a> | <a href="./deepspeed#deepspeed-multi-gpu">Deployment with multiple GPUs</a><a id="deployment-with-multiple-gpus"></a> | <a href="./deepspeed#deepspeed-one-gpu">Deployment with one GPU</a><a id="deployment-with-one-gpu"></a> | <a href="./deepspeed#deepspeed-notebook">Deployment in Notebooks</a><a id="deployment-in-notebooks"></a> | <a href="./deepspeed#deepspeed-config">Configuration</a><a id="configuration"></a> | <a href="./deepspeed#deepspeed-config-passing">Passing Configuration</a><a id="passing-configuration"></a> | <a href="./deepspeed#deepspeed-config-shared">Shared Configuration</a><a id="shared-configuration"></a> | <a href="./deepspeed#deepspeed-zero">ZeRO</a><a id="zero"></a> | <a href="./deepspeed#deepspeed-zero2-config">ZeRO-2 Config</a><a id="zero-2-config"></a> | <a href="./deepspeed#deepspeed-zero3-config">ZeRO-3 Config</a><a id="zero-3-config"></a> | <a href="./deepspeed#deepspeed-nvme">NVMe Support</a><a id="nvme-support"></a> | <a href="./deepspeed#deepspeed-zero2-zero3-performance">ZeRO-2 vs ZeRO-3 Performance</a><a id="zero-2-vs-zero-3-performance"></a> | <a href="./deepspeed#deepspeed-zero2-example">ZeRO-2 Example</a><a id="zero-2-example"></a> | <a href="./deepspeed#deepspeed-zero3-example">ZeRO-3 Example</a><a id="zero-3-example"></a> | <a href="./deepspeed#deepspeed-optimizer">Optimizer</a><a id="optimizer"></a> | <a href="./deepspeed#deepspeed-scheduler">Scheduler</a><a id="scheduler"></a> | <a href="./deepspeed#deepspeed-fp32">fp32 Precision</a><a id="fp32-precision"></a> | <a href="./deepspeed#deepspeed-amp">Automatic Mixed Precision</a><a id="automatic-mixed-precision"></a> | <a href="./deepspeed#deepspeed-bs">Batch Size</a><a id="batch-size"></a> | <a href="./deepspeed#deepspeed-grad-acc">Gradient Accumulation</a><a id="gradient-accumulation"></a> | <a href="./deepspeed#deepspeed-grad-clip">Gradient Clipping</a><a id="gradient-clipping"></a> | <a href="./deepspeed#deepspeed-weight-extraction">Getting The Model Weights Out</a><a id="getting-the-model-weights-out"></a> ]
transformers/docs/source/ja/main_classes/trainer.md/0
{ "file_path": "transformers/docs/source/ja/main_classes/trainer.md", "repo_id": "transformers", "token_count": 19572 }
312
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # BigBird ## Overview BigBird モデルは、[Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) で提案されました。 ザヒール、マンジルとグルガネシュ、グルとダベイ、クマール・アヴィナヴァとエインズリー、ジョシュアとアルベルティ、クリスとオンタノン、 サンティアゴとファム、フィリップとラブラ、アニルードとワン、キーファンとヤン、リーなど。 BigBird は注目度が低い BERT などの Transformer ベースのモデルをさらに長いシーケンスに拡張する、Transformer ベースのモデル。まばらに加えて アテンションと同様に、BigBird は入力シーケンスにランダム アテンションだけでなくグローバル アテンションも適用します。理論的には、 まばらで全体的でランダムな注意を適用すると、完全な注意に近づくことが示されていますが、 長いシーケンスでは計算効率が大幅に向上します。より長いコンテキストを処理できる機能の結果として、 BigBird は、質問応答や BERT または RoBERTa と比較した要約。 論文の要約は次のとおりです。 *BERT などのトランスフォーマーベースのモデルは、NLP で最も成功した深層学習モデルの 1 つです。 残念ながら、それらの中核的な制限の 1 つは、シーケンスに対する二次依存性 (主にメモリに関する) です。 完全な注意メカニズムによる長さです。これを解決するために、BigBird は、まばらな注意メカニズムを提案します。 この二次依存関係を線形に削減します。 BigBird がシーケンス関数の汎用近似器であることを示します。 チューリングは完全であるため、二次完全注意モデルのこれらの特性が保存されます。途中、私たちの 理論分析により、O(1) 個のグローバル トークン (CLS など) を持つ利点の一部が明らかになり、 スパース注意メカニズムの一部としてのシーケンス。提案されたスパース アテンションは、次の長さのシーケンスを処理できます。 同様のハードウェアを使用して以前に可能であったものの 8 倍。より長いコンテキストを処理できる機能の結果として、 BigBird は、質問応答や要約などのさまざまな NLP タスクのパフォーマンスを大幅に向上させます。私達も ゲノミクスデータへの新しいアプリケーションを提案します。* チップ: - BigBird の注意がどのように機能するかについての詳細な説明については、[このブログ投稿](https://huggingface.co/blog/big-bird) を参照してください。 - BigBird には、**original_full** と **block_sparse** の 2 つの実装が付属しています。シーケンス長が 1024 未満の場合、次を使用します。 **block_sparse** を使用してもメリットがないため、**original_full** を使用することをお勧めします。 - コードは現在、3 ブロックと 2 グローバル ブロックのウィンドウ サイズを使用しています。 - シーケンスの長さはブロック サイズで割り切れる必要があります。 - 現在の実装では **ITC** のみがサポートされています。 - 現在の実装では **num_random_blocks = 0** はサポートされていません - BigBird は絶対位置埋め込みを備えたモデルであるため、通常は入力を右側にパディングすることをお勧めします。 左。 このモデルは、[vasudevgupta](https://huggingface.co/vasudevgupta) によって提供されました。元のコードが見つかる [こちら](https://github.com/google-research/bigbird)。 ## ドキュメント リソース - [テキスト分類タスクガイド](../tasks/sequence_classification) - [トークン分類タスクガイド](../tasks/token_classification) - [質問回答タスク ガイド](../tasks/question_answering) - [因果言語モデリング タスク ガイド](../tasks/language_modeling) - [マスクされた言語モデリング タスク ガイド](../tasks/masked_lang_modeling) - [多肢選択タスク ガイド](../tasks/multiple_choice) ## BigBirdConfig [[autodoc]] BigBirdConfig ## BigBirdTokenizer [[autodoc]] BigBirdTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## BigBirdTokenizerFast [[autodoc]] BigBirdTokenizerFast ## BigBird specific outputs [[autodoc]] models.big_bird.modeling_big_bird.BigBirdForPreTrainingOutput <frameworkcontent> <pt> ## BigBirdModel [[autodoc]] BigBirdModel - forward ## BigBirdForPreTraining [[autodoc]] BigBirdForPreTraining - forward ## BigBirdForCausalLM [[autodoc]] BigBirdForCausalLM - forward ## BigBirdForMaskedLM [[autodoc]] BigBirdForMaskedLM - forward ## BigBirdForSequenceClassification [[autodoc]] BigBirdForSequenceClassification - forward ## BigBirdForMultipleChoice [[autodoc]] BigBirdForMultipleChoice - forward ## BigBirdForTokenClassification [[autodoc]] BigBirdForTokenClassification - forward ## BigBirdForQuestionAnswering [[autodoc]] BigBirdForQuestionAnswering - forward </pt> <jax> ## FlaxBigBirdModel [[autodoc]] FlaxBigBirdModel - __call__ ## FlaxBigBirdForPreTraining [[autodoc]] FlaxBigBirdForPreTraining - __call__ ## FlaxBigBirdForCausalLM [[autodoc]] FlaxBigBirdForCausalLM - __call__ ## FlaxBigBirdForMaskedLM [[autodoc]] FlaxBigBirdForMaskedLM - __call__ ## FlaxBigBirdForSequenceClassification [[autodoc]] FlaxBigBirdForSequenceClassification - __call__ ## FlaxBigBirdForMultipleChoice [[autodoc]] FlaxBigBirdForMultipleChoice - __call__ ## FlaxBigBirdForTokenClassification [[autodoc]] FlaxBigBirdForTokenClassification - __call__ ## FlaxBigBirdForQuestionAnswering [[autodoc]] FlaxBigBirdForQuestionAnswering - __call__ </jax> </frameworkcontent>
transformers/docs/source/ja/model_doc/big_bird.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/big_bird.md", "repo_id": "transformers", "token_count": 2762 }
313
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # CLAP ## Overview CLAP モデルは、[Large Scale Contrastive Language-Audio pretraining with feature fusion and keyword-to-caption augmentation](https://arxiv.org/pdf/2211.06687.pdf)、Yusong Wu、Ke Chen、Tianyu Zhang、Yuchen Hui、Taylor Berg-Kirkpatrick、Shlomo Dubnov 著。 CLAP (Contrastive Language-Audio Pretraining) は、さまざまな (音声、テキスト) ペアでトレーニングされたニューラル ネットワークです。タスクに合わせて直接最適化することなく、音声が与えられた場合に最も関連性の高いテキスト スニペットを予測するように指示できます。 CLAP モデルは、SWINTransformer を使用して log-Mel スペクトログラム入力からオーディオ特徴を取得し、RoBERTa モデルを使用してテキスト特徴を取得します。次に、テキストとオーディオの両方の特徴が、同じ次元の潜在空間に投影されます。投影されたオーディオとテキストの特徴の間のドット積が、同様のスコアとして使用されます。 論文の要約は次のとおりです。 *対照学習は、マルチモーダル表現学習の分野で目覚ましい成功を収めています。この論文では、音声データと自然言語記述を組み合わせて音声表現を開発する、対照的な言語音声事前トレーニングのパイプラインを提案します。この目標を達成するために、私たちはまず、さまざまなデータ ソースからの 633,526 個の音声とテキストのペアの大規模なコレクションである LAION-Audio-630K をリリースします。次に、さまざまなオーディオ エンコーダとテキスト エンコーダを考慮して、対照的な言語とオーディオの事前トレーニング モデルを構築します。機能融合メカニズムとキーワードからキャプションへの拡張をモデル設計に組み込んで、モデルが可変長の音声入力を処理できるようにし、パフォーマンスを向上させます。 3 番目に、包括的な実験を実行して、テキストから音声への取得、ゼロショット音声分類、教師付き音声分類の 3 つのタスクにわたってモデルを評価します。結果は、私たちのモデルがテキストから音声への検索タスクにおいて優れたパフォーマンスを達成していることを示しています。オーディオ分類タスクでは、モデルはゼロショット設定で最先端のパフォーマンスを達成し、非ゼロショット設定でもモデルの結果に匹敵するパフォーマンスを得ることができます。 LAION-オーディオ-6* このモデルは、[Younes Belkada](https://huggingface.co/ybelkada) および [Arthur Zucker](https://huggingface.co/ArthurZ) によって提供されました。 元のコードは [こちら](https://github.com/LAION-AI/Clap) にあります。 ## ClapConfig [[autodoc]] ClapConfig - from_text_audio_configs ## ClapTextConfig [[autodoc]] ClapTextConfig ## ClapAudioConfig [[autodoc]] ClapAudioConfig ## ClapFeatureExtractor [[autodoc]] ClapFeatureExtractor ## ClapProcessor [[autodoc]] ClapProcessor ## ClapModel [[autodoc]] ClapModel - forward - get_text_features - get_audio_features ## ClapTextModel [[autodoc]] ClapTextModel - forward ## ClapTextModelWithProjection [[autodoc]] ClapTextModelWithProjection - forward ## ClapAudioModel [[autodoc]] ClapAudioModel - forward ## ClapAudioModelWithProjection [[autodoc]] ClapAudioModelWithProjection - forward
transformers/docs/source/ja/model_doc/clap.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/clap.md", "repo_id": "transformers", "token_count": 1775 }
314
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # DeBERTa ## Overview DeBERTa モデルは、Pengcheng He、Xiaodong Liu、Jianfeng Gao、Weizhu Chen によって [DeBERTa: Decoding-enhanced BERT with Disentangled Attendant](https://arxiv.org/abs/2006.03654) で提案されました。Google のモデルに基づいています。 2018年にリリースされたBERTモデルと2019年にリリースされたFacebookのRoBERTaモデル。 これは、もつれた注意を解きほぐし、使用されるデータの半分を使用して強化されたマスク デコーダ トレーニングを備えた RoBERTa に基づいて構築されています。 ロベルタ。 論文の要約は次のとおりです。 *事前トレーニングされたニューラル言語モデルの最近の進歩により、多くの自然言語モデルのパフォーマンスが大幅に向上しました。 言語処理 (NLP) タスク。この論文では、新しいモデル アーキテクチャ DeBERTa (Decoding-enhanced BERT with これは、2 つの新しい技術を使用して BERT モデルと RoBERTa モデルを改善します。 1つ目は、 もつれを解く注意メカニズム。各単語は、その内容をエンコードする 2 つのベクトルを使用して表現され、 単語間の注意の重みは、それらの単語のもつれ解除行列を使用して計算されます。 内容と相対的な位置。 2 番目に、強化されたマスク デコーダを使用して、出力ソフトマックス レイヤを次のように置き換えます。 モデルの事前トレーニング用にマスクされたトークンを予測します。これら 2 つの手法により効率が大幅に向上することを示します。 モデルの事前トレーニングと下流タスクのパフォーマンスの向上。 RoBERTa-Large と比較すると、DeBERTa モデルは半分のレベルでトレーニングされています。 トレーニング データは幅広い NLP タスクで一貫して優れたパフォーマンスを示し、MNLI で +0.9% の改善を達成しました。 (90.2% 対 91.1%)、SQuAD v2.0 では +2.3% (88.4% 対 90.7%)、RACE では +3.6% (83.2% 対 86.8%) でした。 DeBERTa コードと 事前トレーニングされたモデルは https://github.com/microsoft/DeBERTa で公開されます。* このモデルは [DeBERTa](https://huggingface.co/DeBERTa) によって寄稿されました。このモデルの TF 2.0 実装は、 [kamalkraj](https://huggingface.co/kamalkraj) による寄稿。元のコードは [こちら](https://github.com/microsoft/DeBERTa) にあります。 ## Resources DeBERTa を使い始めるのに役立つ公式 Hugging Face およびコミュニティ (🌎 で示される) リソースのリスト。ここに含めるリソースの送信に興味がある場合は、お気軽にプル リクエストを開いてください。審査させていただきます。リソースは、既存のリソースを複製するのではなく、何か新しいものを示すことが理想的です。 <PipelineTag pipeline="text-classification"/> - DeBERTa を使用して [DeepSpeed を使用して大規模モデルのトレーニングを加速する](https://huggingface.co/blog/accelerate-deepspeed) 方法に関するブログ投稿。 - DeBERTa による [機械学習によるスーパーチャージされた顧客サービス](https://huggingface.co/blog/supercharge-customer-service-with-machine-learning) に関するブログ投稿。 - [`DebertaForSequenceClassification`] は、この [サンプル スクリプト](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-classification) および [ノートブック](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification.ipynb)。 - [`TFDebertaForSequenceClassification`] は、この [サンプル スクリプト](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/text-classification) および [ノートブック](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification-tf.ipynb)。 - [テキスト分類タスクガイド](../tasks/sequence_classification) <PipelineTag pipeline="token-classification" /> - [`DebertaForTokenClassification`] は、この [サンプル スクリプト](https://github.com/huggingface/transformers/tree/main/examples/pytorch/token-classification) および [ノートブック](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification.ipynb)。 - [`TFDebertaForTokenClassification`] は、この [サンプル スクリプト](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/token-classification) および [ノートブック](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification-tf.ipynb)。 - [トークン分類](https://huggingface.co/course/chapter7/2?fw=pt) 🤗 ハグフェイスコースの章。 - 🤗 ハグフェイスコースの [バイトペアエンコーディングのトークン化](https://huggingface.co/course/chapter6/5?fw=pt) の章。 - [トークン分類タスクガイド](../tasks/token_classification) <PipelineTag pipeline="fill-mask"/> - [`DebertaForMaskedLM`] は、この [サンプル スクリプト](https://github.com/huggingface/transformers/tree/main/examples/pytorch/language-modeling#robertabertdistilbert-and-masked-language-modeling) でサポートされています。 [ノートブック](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb)。 - [`TFDebertaForMaskedLM`] は、この [サンプル スクリプト](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/lang-modeling#run_mlmpy) および [ノートブック](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb)。 - [マスクされた言語モデリング](https://huggingface.co/course/chapter7/3?fw=pt) 🤗 顔のハグ コースの章。 - [マスク言語モデリング タスク ガイド](../tasks/masked_language_modeling) <PipelineTag pipeline="question-answering"/> - [`DebertaForQuestionAnswering`] は、この [サンプル スクリプト](https://github.com/huggingface/transformers/tree/main/examples/pytorch/question-answering) および [ノートブック](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering.ipynb)。 - [`TFDebertaForQuestionAnswering`] は、この [サンプル スクリプト](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/question-answering) および [ノートブック](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering-tf.ipynb)。 - [質問回答](https://huggingface.co/course/chapter7/7?fw=pt) 🤗 ハグフェイスコースの章。 - [質問回答タスク ガイド](../tasks/question_answering) ## DebertaConfig [[autodoc]] DebertaConfig ## DebertaTokenizer [[autodoc]] DebertaTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## DebertaTokenizerFast [[autodoc]] DebertaTokenizerFast - build_inputs_with_special_tokens - create_token_type_ids_from_sequences <frameworkcontent> <pt> ## DebertaModel [[autodoc]] DebertaModel - forward ## DebertaPreTrainedModel [[autodoc]] DebertaPreTrainedModel ## DebertaForMaskedLM [[autodoc]] DebertaForMaskedLM - forward ## DebertaForSequenceClassification [[autodoc]] DebertaForSequenceClassification - forward ## DebertaForTokenClassification [[autodoc]] DebertaForTokenClassification - forward ## DebertaForQuestionAnswering [[autodoc]] DebertaForQuestionAnswering - forward </pt> <tf> ## TFDebertaModel [[autodoc]] TFDebertaModel - call ## TFDebertaPreTrainedModel [[autodoc]] TFDebertaPreTrainedModel - call ## TFDebertaForMaskedLM [[autodoc]] TFDebertaForMaskedLM - call ## TFDebertaForSequenceClassification [[autodoc]] TFDebertaForSequenceClassification - call ## TFDebertaForTokenClassification [[autodoc]] TFDebertaForTokenClassification - call ## TFDebertaForQuestionAnswering [[autodoc]] TFDebertaForQuestionAnswering - call </tf> </frameworkcontent>
transformers/docs/source/ja/model_doc/deberta.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/deberta.md", "repo_id": "transformers", "token_count": 3598 }
315
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Efficient Inference on CPU このガイドは、CPU上で大規模なモデルの効率的な推論に焦点を当てています。 ## `BetterTransformer` for faster inference 最近、テキスト、画像、および音声モデルのCPU上での高速な推論のために`BetterTransformer`を統合しました。詳細については、この統合に関するドキュメンテーションを[こちら](https://huggingface.co/docs/optimum/bettertransformer/overview)で確認してください。 ## PyTorch JITモード(TorchScript) TorchScriptは、PyTorchコードからシリアライズ可能で最適化可能なモデルを作成する方法です。任意のTorchScriptプログラムは、Python依存性のないプロセスで保存およびロードできます。 デフォルトのイーガーモードと比較して、PyTorchのjitモードは通常、オペレーターフュージョンなどの最適化手法によりモデル推論のパフォーマンスが向上します。 TorchScriptの簡単な紹介については、[PyTorch TorchScriptチュートリアル](https://pytorch.org/tutorials/beginner/Intro_to_TorchScript_tutorial.html#tracing-modules)を参照してください。 ### JITモードでのIPEXグラフ最適化 Intel® Extension for PyTorchは、Transformersシリーズモデルのjitモードにさらなる最適化を提供します。Intel® Extension for PyTorchをjitモードで使用することを強くお勧めします。Transformersモデルからよく使用されるオペレーターパターンのいくつかは、既にIntel® Extension for PyTorchでjitモードのフュージョンに対応しています。これらのフュージョンパターン(Multi-head-attentionフュージョン、Concat Linear、Linear+Add、Linear+Gelu、Add+LayerNormフュージョンなど)は有効でパフォーマンスが良いです。フュージョンの利点は、ユーザーに透過的に提供されます。分析によれば、最も人気のある質問応答、テキスト分類、トークン分類のNLPタスクの約70%が、これらのフュージョンパターンを使用してFloat32精度とBFloat16混合精度の両方でパフォーマンスの利点を得ることができます。 [IPEXグラフ最適化の詳細情報](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/features/graph_optimization.html)を確認してください。 #### IPEX installation: IPEXのリリースはPyTorchに従っています。[IPEXのインストール方法](https://intel.github.io/intel-extension-for-pytorch/)を確認してください。 ### Usage of JIT-mode Trainerで評価または予測のためにJITモードを有効にするには、ユーザーはTrainerコマンド引数に`jit_mode_eval`を追加する必要があります。 <Tip warning={true}> PyTorch >= 1.14.0の場合、jitモードはjit.traceでdict入力がサポートされているため、予測と評価に任意のモデルに利益をもたらす可能性があります。 PyTorch < 1.14.0の場合、jitモードはforwardパラメーターの順序がjit.traceのタプル入力の順序と一致するモデルに利益をもたらす可能性があります(質問応答モデルなど)。jit.traceがタプル入力の順序と一致しない場合、テキスト分類モデルなど、jit.traceは失敗し、これをフォールバックさせるために例外でキャッチしています。ログはユーザーに通知するために使用されます。 </Tip> [Transformers質問応答の使用例](https://github.com/huggingface/transformers/tree/main/examples/pytorch/question-answering)を参考にしてください。 - Inference using jit mode on CPU: <pre>python run_qa.py \ --model_name_or_path csarron/bert-base-uncased-squad-v1 \ --dataset_name squad \ --do_eval \ --max_seq_length 384 \ --doc_stride 128 \ --output_dir /tmp/ \ --no_cuda \ <b>--jit_mode_eval </b></pre> - Inference with IPEX using jit mode on CPU: <pre>python run_qa.py \ --model_name_or_path csarron/bert-base-uncased-squad-v1 \ --dataset_name squad \ --do_eval \ --max_seq_length 384 \ --doc_stride 128 \ --output_dir /tmp/ \ --no_cuda \ <b>--use_ipex \</b> <b>--jit_mode_eval</b></pre>
transformers/docs/source/ja/perf_infer_cpu.md/0
{ "file_path": "transformers/docs/source/ja/perf_infer_cpu.md", "repo_id": "transformers", "token_count": 1977 }
316
<!--⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Webサーバー用のパイプラインの使用 <Tip> 推論エンジンの作成は複雑なトピックであり、"最適な"ソリューションはおそらく問題の領域に依存するでしょう。CPUまたはGPUを使用していますか?最低のレイテンシ、最高のスループット、多くのモデルのサポート、または特定のモデルの高度な最適化を望んでいますか? このトピックに取り組むための多くの方法があり、私たちが紹介するのは、おそらく最適なソリューションではないかもしれないが、始めるための良いデフォルトです。 </Tip> 重要なことは、Webサーバーはリクエストを待機し、受信したように扱うシステムであるため、[データセット](pipeline_tutorial#using-pipelines-on-a-dataset)のように、イテレータを使用できることです。 通常、Webサーバーは並列処理(マルチスレッド、非同期など)されて、さまざまなリクエストを同時に処理します。一方、パイプライン(および主にその基礎となるモデル)は並列処理にはあまり適していません。それらは多くのRAMを使用するため、実行中に利用可能なリソースをすべて提供するか、計算集約型のジョブである場合に最適です。 Webサーバーは受信と送信の軽い負荷を処理し、実際の作業を1つのスレッドで処理するようにします。この例では`starlette`を使用します。実際のフレームワークはあまり重要ではありませんが、別のフレームワークを使用している場合は、同じ効果を得るためにコードを調整または変更する必要があるかもしれません。 `server.py`を作成してください: ```py from starlette.applications import Starlette from starlette.responses import JSONResponse from starlette.routing import Route from transformers import pipeline import asyncio async def homepage(request): payload = await request.body() string = payload.decode("utf-8") response_q = asyncio.Queue() await request.app.model_queue.put((string, response_q)) output = await response_q.get() return JSONResponse(output) async def server_loop(q): pipe = pipeline(model="google-bert/bert-base-uncased") while True: (string, response_q) = await q.get() out = pipe(string) await response_q.put(out) app = Starlette( routes=[ Route("/", homepage, methods=["POST"]), ], ) @app.on_event("startup") async def startup_event(): q = asyncio.Queue() app.model_queue = q asyncio.create_task(server_loop(q)) ``` ここから始めることができます: ```bash uvicorn server:app ``` そして、次のようにクエリできます: ```bash curl -X POST -d "test [MASK]" http://localhost:8000/ #[{"score":0.7742936015129089,"token":1012,"token_str":".","sequence":"test."},...] ``` そして、これでウェブサーバーを作成する方法の良いアイデアを持っています! 本当に重要なのは、モデルを**一度だけ**ロードすることです。これにより、ウェブサーバー上にモデルのコピーがないため、不必要なRAMが使用されなくなります。 その後、キューイングメカニズムを使用して、動的バッチ処理を行うなど、いくつかのアイテムを蓄積してから推論を行うなど、高度な処理を行うことができます: <Tip warning={true}> 以下のコードサンプルは、可読性のために擬似コードのように書かれています。システムリソースに合理的かどうかを確認せずに実行しないでください! </Tip> ```py (string, rq) = await q.get() strings = [] queues = [] while True: try: (string, rq) = await asyncio.wait_for(q.get(), timeout=0.001) # 1ms except asyncio.exceptions.TimeoutError: break strings.append(string) queues.append(rq) strings outs = pipe(strings, batch_size=len(strings)) for rq, out in zip(queues, outs): await rq.put(out) ``` まず第一に、通常はあまり良いアイデアではないバッチサイズの制限がありません。次に、タイムアウトはキューの取得ごとにリセットされるため、推論を実行する前に1ms以上待つ可能性があります(最初のリクエストの遅延に1ms分遅れが生じます)。 1msの締め切りを1回だけ持つのが良いでしょう。 これは、キューに何もない場合でも常に1ms待機しますが、キューに何もない場合に推論を開始したい場合は適していないかもしれません。ただし、バッチ処理が本当に重要な場合には意味があるかもしれません。再度、1つの最適な解決策は存在しません。 ## Few things you might want to consider ### Error checking 本番環境では多くの問題が発生する可能性があります:メモリ不足、スペース不足、モデルの読み込みが失敗するかもしれません、クエリが誤っているかもしれません、クエリが正しい場合でもモデルの構成エラーのために実行に失敗するかもしれませんなど。 一般的には、サーバーがエラーをユーザーに出力すると良いため、これらのエラーを表示するための多くの`try..except`ステートメントを追加することは良いアイデアです。ただし、セキュリティコンテキストに応じてこれらのエラーをすべて表示することはセキュリティリスクになる可能性があることに注意してください。 ### Circuit breaking Webサーバーは通常、過負荷時に正しいエラーを返す方が良いです。クエリを無期限に待つ代わりに適切なエラーを返します。長時間待つ代わりに503エラーを返すか、長時間待ってから504エラーを返すかです。 提案されたコードでは単一のキューがあるため、キューサイズを見ることは、Webサーバーが負荷に耐える前にエラーを返すための基本的な方法です。 ### Blocking the main thread 現在、PyTorchは非同期を認識していないため、計算はメインスレッドをブロックします。つまり、PyTorchが独自のスレッド/プロセスで実行されるようにすると良いでしょう。提案されたコードは、スレッドと非同期とキューがうまく連携しないため、これは行われていませんが、最終的には同じことを行います。 これは、単一のアイテムの推論が長い場合(>1秒)に重要です。この場合、推論中にすべてのクエリが1秒待たなければならないことを意味します。 ### Dynamic batching 一般的に、バッチ処理は1回のアイテムを1回渡すよりも改善されることは必ずしもありません(詳細は[バッチ処理の詳細](./main_classes/pipelines#pipeline-batching)を参照)。しかし、正しい設定で使用すると非常に効果的です。APIではデフォルトで動的バッチ処理は行われません(遅延の機会が多すぎます)。しかし、非常に大規模なモデルであるBLOOM推論の場合、動的バッチ処理は**重要**です。これにより、すべてのユーザーにとってまともなエクスペリエンスを提供できます。 以上が、提供されたテキストのMarkdown形式の翻訳です。
transformers/docs/source/ja/pipeline_webserver.md/0
{ "file_path": "transformers/docs/source/ja/pipeline_webserver.md", "repo_id": "transformers", "token_count": 3402 }
317
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Masked language modeling [[open-in-colab]] <Youtube id="mqElG5QJWUg"/> マスクされた言語モデリングはシーケンス内のマスクされたトークンを予測し、モデルはトークンを双方向に処理できます。これ これは、モデルが左右のトークンに完全にアクセスできることを意味します。マスクされた言語モデリングは、次のようなタスクに最適です。 シーケンス全体の文脈をよく理解する必要があります。 BERT はマスクされた言語モデルの一例です。 このガイドでは、次の方法を説明します。 1. [ELI5](https://huggingface.co/distilbert/distilroberta-base) の [r/askscience](https://www.reddit.com/r/askscience/) サブセットで [DistilRoBERTa](https://huggingface.co/distilbert/distilroberta-base) を微調整します。 ://huggingface.co/datasets/eli5) データセット。 2. 微調整したモデルを推論に使用します。 <Tip> このタスクと互換性のあるすべてのアーキテクチャとチェックポイントを確認するには、[タスクページ](https://huggingface.co/tasks/fill-mask) を確認することをお勧めします。 </Tip> 始める前に、必要なライブラリがすべてインストールされていることを確認してください。 ```bash pip install transformers datasets evaluate ``` モデルをアップロードしてコミュニティと共有できるように、Hugging Face アカウントにログインすることをお勧めします。プロンプトが表示されたら、トークンを入力してログインします。 ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` ## Load ELI5 dataset まず、ELI5 データセットの r/askscience サブセットの小さいサブセットを 🤗 データセット ライブラリからロードします。これで データセット全体のトレーニングにさらに時間を費やす前に、実験してすべてが機能することを確認する機会が与えられます。 ```py >>> from datasets import load_dataset >>> eli5 = load_dataset("eli5", split="train_asks[:5000]") ``` [`~datasets.Dataset.train_test_split`] メソッドを使用して、データセットの `train_asks` をトレイン セットとテスト セットに分割します。 ```py >>> eli5 = eli5.train_test_split(test_size=0.2) ``` 次に、例を見てみましょう。 ```py >>> eli5["train"][0] {'answers': {'a_id': ['c3d1aib', 'c3d4lya'], 'score': [6, 3], 'text': ["The velocity needed to remain in orbit is equal to the square root of Newton's constant times the mass of earth divided by the distance from the center of the earth. I don't know the altitude of that specific mission, but they're usually around 300 km. That means he's going 7-8 km/s.\n\nIn space there are no other forces acting on either the shuttle or the guy, so they stay in the same position relative to each other. If he were to become unable to return to the ship, he would presumably run out of oxygen, or slowly fall into the atmosphere and burn up.", "Hope you don't mind me asking another question, but why aren't there any stars visible in this photo?"]}, 'answers_urls': {'url': []}, 'document': '', 'q_id': 'nyxfp', 'selftext': '_URL_0_\n\nThis was on the front page earlier and I have a few questions about it. Is it possible to calculate how fast the astronaut would be orbiting the earth? Also how does he stay close to the shuttle so that he can return safely, i.e is he orbiting at the same speed and can therefore stay next to it? And finally if his propulsion system failed, would he eventually re-enter the atmosphere and presumably die?', 'selftext_urls': {'url': ['http://apod.nasa.gov/apod/image/1201/freeflyer_nasa_3000.jpg']}, 'subreddit': 'askscience', 'title': 'Few questions about this space walk photograph.', 'title_urls': {'url': []}} ``` これは多くのことのように見えるかもしれませんが、実際に関心があるのは`text`フィールドだけです。言語モデリング タスクの優れた点は、次の単語がラベル * であるため、ラベル (教師なしタスクとも呼ばれます) が必要ないことです。 ## Preprocess <Youtube id="8PmhEIXhBvI"/> マスクされた言語モデリングの場合、次のステップは、`text`サブフィールドを処理するために DistilRoBERTa トークナイザーをロードすることです。 ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilroberta-base") ``` 上の例からわかるように、`text`フィールドは実際には`answers`内にネストされています。これは、次のことを行う必要があることを意味します [` flatten`](https://huggingface.co/docs/datasets/process.html#flatten) メソッドを使用して、ネストされた構造から `text` サブフィールドを抽出します。 ```py >>> eli5 = eli5.flatten() >>> eli5["train"][0] {'answers.a_id': ['c3d1aib', 'c3d4lya'], 'answers.score': [6, 3], 'answers.text': ["The velocity needed to remain in orbit is equal to the square root of Newton's constant times the mass of earth divided by the distance from the center of the earth. I don't know the altitude of that specific mission, but they're usually around 300 km. That means he's going 7-8 km/s.\n\nIn space there are no other forces acting on either the shuttle or the guy, so they stay in the same position relative to each other. If he were to become unable to return to the ship, he would presumably run out of oxygen, or slowly fall into the atmosphere and burn up.", "Hope you don't mind me asking another question, but why aren't there any stars visible in this photo?"], 'answers_urls.url': [], 'document': '', 'q_id': 'nyxfp', 'selftext': '_URL_0_\n\nThis was on the front page earlier and I have a few questions about it. Is it possible to calculate how fast the astronaut would be orbiting the earth? Also how does he stay close to the shuttle so that he can return safely, i.e is he orbiting at the same speed and can therefore stay next to it? And finally if his propulsion system failed, would he eventually re-enter the atmosphere and presumably die?', 'selftext_urls.url': ['http://apod.nasa.gov/apod/image/1201/freeflyer_nasa_3000.jpg'], 'subreddit': 'askscience', 'title': 'Few questions about this space walk photograph.', 'title_urls.url': []} ``` `answers`接頭辞で示されるように、各サブフィールドは個別の列になり、`text`フィールドはリストになりました。その代わり 各文を個別にトークン化する場合は、リストを文字列に変換して、それらをまとめてトークン化できるようにします。 以下は、各例の文字列のリストを結合し、結果をトークン化する最初の前処理関数です。 ```py >>> def preprocess_function(examples): ... return tokenizer([" ".join(x) for x in examples["answers.text"]]) ``` この前処理関数をデータセット全体に適用するには、🤗 Datasets [`~datasets.Dataset.map`] メソッドを使用します。 `map` 関数を高速化するには、`batched=True` を設定してデータセットの複数の要素を一度に処理し、`num_proc` でプロセスの数を増やします。不要な列を削除します。 ```py >>> tokenized_eli5 = eli5.map( ... preprocess_function, ... batched=True, ... num_proc=4, ... remove_columns=eli5["train"].column_names, ... ) ``` このデータセットにはトークン シーケンスが含まれていますが、その一部はモデルの最大入力長よりも長くなります。 2 番目の前処理関数を使用して、 - すべてのシーケンスを連結します - 連結されたシーケンスを`block_size`で定義された短いチャンクに分割します。これは、最大入力長より短く、GPU RAM に十分な長さである必要があります。 ```py >>> block_size = 128 >>> def group_texts(examples): ... # Concatenate all texts. ... concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()} ... total_length = len(concatenated_examples[list(examples.keys())[0]]) ... # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can ... # customize this part to your needs. ... if total_length >= block_size: ... total_length = (total_length // block_size) * block_size ... # Split by chunks of block_size. ... result = { ... k: [t[i : i + block_size] for i in range(0, total_length, block_size)] ... for k, t in concatenated_examples.items() ... } ... return result ``` データセット全体に`group_texts`関数を適用します。 ```py >>> lm_dataset = tokenized_eli5.map(group_texts, batched=True, num_proc=4) ``` 次に、[`DataCollat​​orForLanguageModeling`] を使用してサンプルのバッチを作成します。データセット全体を最大長までパディングするのではなく、照合中にバッチ内の最長の長さまで文を *動的にパディング* する方が効率的です。 <frameworkcontent> <pt> シーケンス終了トークンをパディング トークンとして使用し、データを反復するたびにランダムにトークンをマスクするために `mlm_probability` を指定します。 ```py >>> from transformers import DataCollatorForLanguageModeling >>> tokenizer.pad_token = tokenizer.eos_token >>> data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=0.15) ``` </pt> <tf> シーケンス終了トークンをパディング トークンとして使用し、データを反復するたびにランダムにトークンをマスクするために `mlm_probability` を指定します。 ```py >>> from transformers import DataCollatorForLanguageModeling >>> data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=0.15, return_tensors="tf") ``` </tf> </frameworkcontent> ## Train <frameworkcontent> <pt> <Tip> [`Trainer`] を使用したモデルの微調整に慣れていない場合は、[ここ](../training#train-with-pytorch-trainer) の基本的なチュートリアルをご覧ください。 </Tip> これでモデルのトレーニングを開始する準備が整いました。 [`AutoModelForMaskedLM`] を使用して DistilRoBERTa をロードします。 ```py >>> from transformers import AutoModelForMaskedLM >>> model = AutoModelForMaskedLM.from_pretrained("distilbert/distilroberta-base") ``` この時点で残っている手順は次の 3 つだけです。 1. [`TrainingArguments`] でトレーニング ハイパーパラメータを定義します。唯一の必須パラメータは、モデルの保存場所を指定する `output_dir` です。 `push_to_hub=True`を設定して、このモデルをハブにプッシュします (モデルをアップロードするには、Hugging Face にサインインする必要があります)。 2. トレーニング引数をモデル、データセット、データ照合器とともに [`Trainer`] に渡します。 3. [`~Trainer.train`] を呼び出してモデルを微調整します。 ```py >>> training_args = TrainingArguments( ... output_dir="my_awesome_eli5_mlm_model", ... eval_strategy="epoch", ... learning_rate=2e-5, ... num_train_epochs=3, ... weight_decay=0.01, ... push_to_hub=True, ... ) >>> trainer = Trainer( ... model=model, ... args=training_args, ... train_dataset=lm_dataset["train"], ... eval_dataset=lm_dataset["test"], ... data_collator=data_collator, ... ) >>> trainer.train() ``` トレーニングが完了したら、 [`~transformers.Trainer.evaluate`] メソッドを使用してモデルを評価し、その複雑さを取得します。 ```py >>> import math >>> eval_results = trainer.evaluate() >>> print(f"Perplexity: {math.exp(eval_results['eval_loss']):.2f}") Perplexity: 8.76 ``` 次に、 [`~transformers.Trainer.push_to_hub`] メソッドを使用してモデルをハブに共有し、誰もがモデルを使用できるようにします。 ```py >>> trainer.push_to_hub() ``` </pt> <tf> <Tip> Keras を使用したモデルの微調整に慣れていない場合は、[こちら](../training#train-a-tensorflow-model-with-keras) の基本的なチュートリアルをご覧ください。 </Tip> TensorFlow でモデルを微調整するには、オプティマイザー関数、学習率スケジュール、およびいくつかのトレーニング ハイパーパラメーターをセットアップすることから始めます。 ```py >>> from transformers import create_optimizer, AdamWeightDecay >>> optimizer = AdamWeightDecay(learning_rate=2e-5, weight_decay_rate=0.01) ``` 次に、[`TFAutoModelForMaskedLM`] を使用して DistilRoBERTa をロードできます。 ```py >>> from transformers import TFAutoModelForMaskedLM >>> model = TFAutoModelForMaskedLM.from_pretrained("distilbert/distilroberta-base") ``` [`~transformers.TFPreTrainedModel.prepare_tf_dataset`] を使用して、データセットを `tf.data.Dataset` 形式に変換します。 ```py >>> tf_train_set = model.prepare_tf_dataset( ... lm_dataset["train"], ... shuffle=True, ... batch_size=16, ... collate_fn=data_collator, ... ) >>> tf_test_set = model.prepare_tf_dataset( ... lm_dataset["test"], ... shuffle=False, ... batch_size=16, ... collate_fn=data_collator, ... ) ``` [`compile`](https://keras.io/api/models/model_training_apis/#compile-method) を使用してトレーニング用のモデルを設定します。 Transformers モデルにはすべてデフォルトのタスク関連の損失関数があるため、次の場合を除き、損失関数を指定する必要はないことに注意してください。 ```py >>> import tensorflow as tf >>> model.compile(optimizer=optimizer) # No loss argument! ``` This can be done by specifying where to push your model and tokenizer in the [`~transformers.PushToHubCallback`]: ```py >>> from transformers.keras_callbacks import PushToHubCallback >>> callback = PushToHubCallback( ... output_dir="my_awesome_eli5_mlm_model", ... tokenizer=tokenizer, ... ) ``` ついに、モデルのトレーニングを開始する準備が整いました。トレーニングおよび検証データセット、エポック数、コールバックを指定して [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) を呼び出し、モデルを微調整します。 ```py >>> model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=3, callbacks=[callback]) ``` トレーニングが完了すると、モデルは自動的にハブにアップロードされ、誰でも使用できるようになります。 </tf> </frameworkcontent> <Tip> マスクされた言語モデリング用にモデルを微調整する方法のより詳細な例については、対応するドキュメントを参照してください。 [PyTorch ノートブック](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb) または [TensorFlow ノートブック](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb)。 </Tip> ## Inference モデルを微調整したので、それを推論に使用できるようになりました。 モデルに空白を埋めるテキストを考え出し、特別な `<mask>` トークンを使用して空白を示します。 ```py >>> text = "The Milky Way is a <mask> galaxy." ``` 推論用に微調整されたモデルを試す最も簡単な方法は、それを [`pipeline`] で使用することです。モデルを使用してフィルマスクの`pipeline`をインスタンス化し、テキストをそれに渡します。必要に応じて、`top_k`パラメータを使用して、返す予測の数を指定できます。 ```py >>> from transformers import pipeline >>> mask_filler = pipeline("fill-mask", "stevhliu/my_awesome_eli5_mlm_model") >>> mask_filler(text, top_k=3) [{'score': 0.5150994658470154, 'token': 21300, 'token_str': ' spiral', 'sequence': 'The Milky Way is a spiral galaxy.'}, {'score': 0.07087188959121704, 'token': 2232, 'token_str': ' massive', 'sequence': 'The Milky Way is a massive galaxy.'}, {'score': 0.06434620916843414, 'token': 650, 'token_str': ' small', 'sequence': 'The Milky Way is a small galaxy.'}] ``` <frameworkcontent> <pt> テキストをトークン化し、`input_ids`を PyTorch テンソルとして返します。 `<mask>` トークンの位置も指定する必要があります。 ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("stevhliu/my_awesome_eli5_mlm_model") >>> inputs = tokenizer(text, return_tensors="pt") >>> mask_token_index = torch.where(inputs["input_ids"] == tokenizer.mask_token_id)[1] ``` 入力をモデルに渡し、マスクされたトークンの`logits`を返します。 ```py >>> from transformers import AutoModelForMaskedLM >>> model = AutoModelForMaskedLM.from_pretrained("stevhliu/my_awesome_eli5_mlm_model") >>> logits = model(**inputs).logits >>> mask_token_logits = logits[0, mask_token_index, :] ``` 次に、マスクされた 3 つのトークンを最も高い確率で返し、出力します。 ```py >>> top_3_tokens = torch.topk(mask_token_logits, 3, dim=1).indices[0].tolist() >>> for token in top_3_tokens: ... print(text.replace(tokenizer.mask_token, tokenizer.decode([token]))) The Milky Way is a spiral galaxy. The Milky Way is a massive galaxy. The Milky Way is a small galaxy. ``` </pt> <tf> テキストをトークン化し、`input_ids`を TensorFlow テンソルとして返します。 `<mask>` トークンの位置も指定する必要があります。 ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("stevhliu/my_awesome_eli5_mlm_model") >>> inputs = tokenizer(text, return_tensors="tf") >>> mask_token_index = tf.where(inputs["input_ids"] == tokenizer.mask_token_id)[0, 1] ``` 入力をモデルに渡し、マスクされたトークンの`logits`を返します。 ```py >>> from transformers import TFAutoModelForMaskedLM >>> model = TFAutoModelForMaskedLM.from_pretrained("stevhliu/my_awesome_eli5_mlm_model") >>> logits = model(**inputs).logits >>> mask_token_logits = logits[0, mask_token_index, :] ``` 次に、マスクされた 3 つのトークンを最も高い確率で返し、出力します。 ```py >>> top_3_tokens = tf.math.top_k(mask_token_logits, 3).indices.numpy() >>> for token in top_3_tokens: ... print(text.replace(tokenizer.mask_token, tokenizer.decode([token]))) The Milky Way is a spiral galaxy. The Milky Way is a massive galaxy. The Milky Way is a small galaxy. ``` </tf> </frameworkcontent>
transformers/docs/source/ja/tasks/masked_language_modeling.md/0
{ "file_path": "transformers/docs/source/ja/tasks/masked_language_modeling.md", "repo_id": "transformers", "token_count": 7720 }
318
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # How 🤗 Transformers solve tasks [🤗 Transformersでできること](task_summary)で、自然言語処理(NLP)、音声とオーディオ、コンピュータビジョンのタスク、それらの重要なアプリケーションについて学びました。このページでは、モデルがこれらのタスクをどのように解決するかを詳しく見て、モデルの内部で何が起こっているかを説明します。特定のタスクを解決するためには多くの方法があり、一部のモデルは特定のテクニックを実装するか、または新しい観点からタスクに取り組むかもしれませんが、Transformerモデルにとって、一般的なアイデアは同じです。柔軟なアーキテクチャのおかげで、ほとんどのモデルはエンコーダ、デコーダ、またはエンコーダ-デコーダ構造の変種です。Transformerモデル以外にも、当社のライブラリにはコンピュータビジョンタスクに今でも使用されているいくつかの畳み込みニューラルネットワーク(CNN)もあります。また、現代のCNNがどのように機能するかも説明します。 タスクがどのように解決されるかを説明するために、モデル内部で有用な予測を出力するために何が起こるかについて説明します。 - [Wav2Vec2](model_doc/wav2vec2):オーディオ分類および自動音声認識(ASR)向け - [Vision Transformer(ViT)](model_doc/vit)および[ConvNeXT](model_doc/convnext):画像分類向け - [DETR](model_doc/detr):オブジェクト検出向け - [Mask2Former](model_doc/mask2former):画像セグメンテーション向け - [GLPN](model_doc/glpn):深度推定向け - [BERT](model_doc/bert):エンコーダを使用するテキスト分類、トークン分類、および質問応答などのNLPタスク向け - [GPT2](model_doc/gpt2):デコーダを使用するテキスト生成などのNLPタスク向け - [BART](model_doc/bart):エンコーダ-デコーダを使用する要約および翻訳などのNLPタスク向け <Tip> さらに進む前に、元のTransformerアーキテクチャの基本的な知識を持つと良いです。エンコーダ、デコーダ、および注意力がどのように動作するかを知っておくと、異なるTransformerモデルがどのように動作するかを理解するのに役立ちます。始めているか、リフレッシュが必要な場合は、詳細な情報については当社の[コース](https://huggingface.co/course/chapter1/4?fw=pt)をチェックしてください! </Tip> ## Speech and audio [Wav2Vec2](model_doc/wav2vec2)は、未ラベルの音声データで事前トレーニングされ、オーディオ分類および自動音声認識のラベル付きデータでファインチューンされた自己教師モデルです。 <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/wav2vec2_architecture.png"/> </div> このモデルには主に次の4つのコンポーネントがあります。 1. *特徴エンコーダ*:生の音声波形を受け取り、平均値をゼロに正規化し、単位分散に変換し、それを20msごとの特徴ベクトルのシーケンスに変換します。 2. 波形は自然に連続しているため、テキストのシーケンスを単語に分割できるようにできるように、特徴ベクトルは*量子化モジュール*に渡され、離散音声ユニットを学習しようとします。音声ユニットは*コードブック*(語彙と考えることができます)として知られるコードワードのコレクションから選択されます。コードブックから、連続したオーディオ入力を最もよく表すベクトルまたは音声ユニット(ターゲットラベルと考えることができます)が選択され、モデルを介して転送されます。 3. 特徴ベクトルの約半分はランダムにマスクされ、マスクされた特徴ベクトルは*コンテキストネットワーク*に供給されます。これは、相対的な位置エンベッディングも追加するTransformerエンコーダです。 4. コンテキストネットワークの事前トレーニングの目的は*コントラスティブタスク*です。モデルはマスクされた予測の真の量子化音声表現を、偽の予測のセットから予測しなければならず、モデルは最も似たコンテキストベクトルと量子化音声ユニット(ターゲットラベル)を見つけるように促されます。 今、Wav2Vec2は事前トレーニングされているので、オーディオ分類または自動音声認識のためにデータをファインチューンできます! ### Audio classification 事前トレーニングされたモデルをオーディオ分類に使用するには、基本的なWav2Vec2モデルの上にシーケンス分類ヘッドを追加します。分類ヘッドはエンコーダの隠れた状態を受け入れる線形層で、各オーディオフレームから学習された特徴を表します。これらの隠れた状態は長さが異なる可能性があるため、最初に隠れた状態がプールされ、次にクラスラベルに対するロジットに変換されます。ロジットとターゲット間のクロスエントロピー損失が計算され、最も可能性の高いクラスを見つけるために使用されます。 オーディオ分類を試す準備はできましたか?Wav2Vec2をファインチューンして推論に使用する方法を学ぶための完全な[オーディオ分類ガイド](tasks/audio_classification)をチェックしてください! ### Automatic speech recognition 事前トレーニングされたモデルを自動音声認識に使用するには、[connectionist temporal classification(CTC)](glossary#connectionist-temporal-classification-ctc)のための基本的なWav2Vec2モデルの上に言語モデリングヘッドを追加します。言語モデリングヘッドはエンコーダの隠れた状態を受け入れ、それらをロジットに変換します。各ロジットはトークンクラスを表し(トークン数はタスクの語彙から来ます)、ロジットとターゲット間のCTC損失が計算され、次に転写に変換されます。 自動音声認識を試す準備はできましたか?Wav2Vec2をファインチューンして推論に使用する方法を学ぶための完全な[自動音声認識ガイド](tasks/asr)をチェックしてください! ## Computer vision コンピュータビジョンのタスクをアプローチする方法は2つあります。 1. 画像をパッチのシーケンスに分割し、Transformerを使用して並列に処理します。 2. [ConvNeXT](model_doc/convnext)などのモダンなCNNを使用します。これらは畳み込み層を使用しますが、モダンなネットワーク設計を採用しています。 <Tip> サードアプローチでは、Transformerと畳み込みを組み合わせたものもあります(例:[Convolutional Vision Transformer](model_doc/cvt)または[LeViT](model_doc/levit))。これらについては議論しませんが、これらはここで調べる2つのアプローチを組み合わせています。 </Tip> ViTとConvNeXTは画像分類によく使用されますが、オブジェクト検出、セグメンテーション、深度推定などの他のビジョンタスクに対しては、DETR、Mask2Former、GLPNなどが適しています。 ### Image classification ViTとConvNeXTの両方を画像分類に使用できます。主な違いは、ViTが注意メカニズムを使用し、ConvNeXTが畳み込みを使用することです。 #### Transformer [ViT](model_doc/vit)は畳み込みを完全にTransformerアーキテクチャで置き換えます。元のTransformerに精通している場合、ViTの理解は既にほとんど完了しています。 <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/vit_architecture.jpg"/> </div> ViTが導入した主な変更点は、画像をTransformerに供給する方法です。 1. 画像は正方形で重ならないパッチのシーケンスに分割され、各パッチはベクトルまたは*パッチ埋め込み*に変換されます。パッチ埋め込みは、適切な入力次元を作成するために2D畳み込み層から生成されます(基本のTransformerの場合、各パッチ埋め込みに768の値があります)。224x224ピクセルの画像がある場合、それを16x16の画像パッチに分割できます。テキストが単語にトークン化されるように、画像はパッチのシーケンスに「トークン化」されます。 2. *学習埋め込み*、つまり特別な `[CLS]` トークンが、BERTのようにパッチ埋め込みの先頭に追加されます。 `[CLS]` トークンの最終的な隠れた状態は、付属の分類ヘッドの入力として使用されます。他の出力は無視されます。このトークンは、モデルが画像の表現をエンコードする方法を学ぶのに役立ちます。 3. パッチと学習埋め込みに追加する最後の要素は*位置埋め込み*です。モデルは画像パッチがどのように並べられているかを知りませんので、位置埋め込みも学習可能で、パッチ埋め込みと同じサイズを持ちます。最後に、すべての埋め込みがTransformerエンコーダに渡されます。 4. 出力、具体的には `[CLS]` トークンの出力だけが、多層パーセプトロンヘッド(MLP)に渡されます。ViTの事前トレーニングの目的は単純に分類です。他の分類ヘッドと同様に、MLPヘッドは出力をクラスラベルに対するロジットに変換し、クロスエントロピー損失を計算して最も可能性の高いクラスを見つけます。 画像分類を試す準備はできましたか?ViTをファインチューンして推論に使用する方法を学ぶための完全な[画像分類ガイド](tasks/image_classification)をチェックしてください! #### CNN <Tip> このセクションでは畳み込みについて簡単に説明していますが、画像の形状とサイズがどのように変化するかを事前に理解していると役立ちます。畳み込みに慣れていない場合は、fastaiの書籍から[Convolution Neural Networks chapter](https://github.com/fastai/fastbook/blob/master/13_convolutions.ipynb)をチェックしてみてください! </Tip> [ConvNeXT](model_doc/convnext)は、性能を向上させるために新しいモダンなネットワーク設計を採用したCNNアーキテクチャです。ただし、畳み込みはモデルの中核にまだあります。高レベルから見た場合、[畳み込み(convolution)](glossary#convolution)は、小さな行列(*カーネル*)が画像のピクセルの小さなウィンドウに乗算される操作です。それは特定のテクスチャや線の曲率などの特徴を計算します。その後、次のピクセルのウィンドウに移動します。畳み込みが移動する距離は*ストライド*として知られています。 <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/convolution.gif"/> </div> <small>[Convolution Arithmetic for Deep Learning](https://arxiv.org/abs/1603.07285) からの基本的なパディングやストライドのない畳み込み。</small> この出力を別の畳み込み層に供給し、各連続した層ごとに、ネットワークはホットドッグやロケットのようなより複雑で抽象的なものを学習します。畳み込み層の間には、特徴の次元を削減し、特徴の位置の変動に対してモデルをより堅牢にするためにプーリング層を追加するのが一般的です。 <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/convnext_architecture.png"/> </div> ConvNeXTは、以下の5つの方法でCNNをモダン化しています。 1. 各ステージのブロック数を変更し、画像をより大きなストライドと対応するカーネルサイズで*パッチ化*します。重ならないスライディングウィンドウは、これにより画像をパッチに分割するViTの戦略と似ています。 2. *ボトルネック* レイヤーはチャネル数を縮小し、それを復元します。1x1の畳み込みを実行するのは速く、深さを増やすことができます。逆ボトルネックは逆のことを行い、チャネル数を拡張し、それを縮小します。これはメモリ効率が高いです。 3. ボトルネックレイヤー内の通常の3x3の畳み込み層を、*深度方向の畳み込み*で置き換えます。これは各入力チャネルに個別に畳み込みを適用し、最後にそれらを積み重ねる畳み込みです。これにより、性能向上のためにネットワーク幅が広がります。 4. ViTはグローバル受容野を持っているため、その注意メカニズムのおかげで一度に画像の多くを見ることができます。ConvNeXTはこの効果を再現しようとし、カーネルサイズを7x7に増やします。 5. ConvNeXTはまた、Transformerモデルを模倣するいくつかのレイヤーデザイン変更を行っています。アクティベーションと正規化レイヤーが少なく、活性化関数はReLUの代わりにGELUに切り替え、BatchNormの代わりにLayerNormを使用しています。 畳み込みブロックからの出力は、分類ヘッドに渡され、出力をロジットに変換し、最も可能性の高いラベルを見つけるためにクロスエントロピー損失が計算されます。 ### Object detection [DETR](model_doc/detr)、*DEtection TRansformer*、はCNNとTransformerエンコーダーデコーダーを組み合わせたエンドツーエンドのオブジェクト検出モデルです。 <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/detr_architecture.png"/> </div> 1. 事前トレーニングされたCNN *バックボーン* は、ピクセル値で表される画像を受け取り、それの低解像度の特徴マップを作成します。特徴マップには次元削減のために1x1の畳み込みが適用され、高レベルの画像表現を持つ新しい特徴マップが作成されます。Transformerは連続モデルであるため、特徴マップは特徴ベクトルのシーケンスに平坦化され、位置エンベディングと組み合わせられます。 2. 特徴ベクトルはエンコーダーに渡され、その注意レイヤーを使用して画像表現を学習します。次に、エンコーダーの隠れ状態はデコーダーの*オブジェクトクエリ*と組み合わされます。オブジェクトクエリは、画像の異なる領域に焦点を当てる学習埋め込みで、各注意レイヤーを進行するにつれて更新されます。デコーダーの隠れ状態は、各オブジェクトクエリに対してバウンディングボックスの座標とクラスラベルを予測するフィードフォワードネットワークに渡されます。または、存在しない場合は `no object` が渡されます。 DETRは各オブジェクトクエリを並行してデコードして、*N*の最終的な予測(*N*はクエリの数)を出力します。典型的な自己回帰モデルが1つの要素を1回ずつ予測するのとは異なり、オブジェクト検出はセット予測タスク(`バウンディングボックス`、`クラスラベル`)であり、1回のパスで*N*の予測を行います。 3. 訓練中、DETRは*二部マッチング損失*を使用して、固定された数の予測と固定された一連の正解ラベルを比較します。 *N*のラベルセットに正解ラベルが少ない場合、 `no object` クラスでパディングされます。この損失関数は、DETRに予測と正解ラベルとの間で1対1の割り当てを見つけるように促します。バウンディングボックスまたはクラスラベルのどちらかが正しくない場合、損失が発生します。同様に、DETRが存在しないオブジェクトを予測した場合、罰金が科せられます。これにより、DETRは1つの非常に顕著なオブジェクトに焦点を当てるのではなく、画像内の他のオブジェクトを見つけるように促されます。 DETRの上にオブジェクト検出ヘッドを追加して、クラスラベルとバウンディングボックスの座標を見つけます。オブジェクト検出ヘッドには2つのコンポーネントがあります:デコーダーの隠れ状態をクラスラベルのロジットに変換するための線形層、およびバウンディングボックスを予測するためのMLPです。 オブジェクト検出を試す準備はできましたか?DETROの完全な[オブジェクト検出ガイド](tasks/object_detection)をチェックして、DETROのファインチューニング方法と推論方法を学んでください! ### Image segmentation [Mask2Former](model_doc/mask2former)は、すべての種類の画像セグメンテーションタスクを解決するためのユニバーサルアーキテクチャです。従来のセグメンテーションモデルは通常、インスタンス、セマンティック、またはパノプティックセグメンテーションの特定のサブタスクに合わせて設計されています。Mask2Formerは、それらのタスクのそれぞれを*マスク分類*の問題として捉えます。マスク分類はピクセルを*N*のセグメントにグループ化し、与えられた画像に対して*N*のマスクとそれに対応するクラスラベルを予測します。このセクションでは、Mask2Formerの動作方法を説明し、最後にSegFormerのファインチューニングを試すことができます。 <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/mask2former_architecture.png"/> </div> Mask2Formerの主要なコンポーネントは次の3つです。 1. [Swin](model_doc/swin)バックボーンは画像を受け入れ、3つの連続する3x3の畳み込みから低解像度の画像特徴マップを作成します。 2. 特徴マップは*ピクセルデコーダー*に渡され、低解像度の特徴を高解像度のピクセル埋め込みに徐々にアップサンプリングします。ピクセルデコーダーは実際には解像度1/32、1/16、および1/8のオリジナル画像のマルチスケール特徴(低解像度と高解像度の特徴を含む)を生成します。 3. これらの異なるスケールの特徴マップのそれぞれは、高解像度の特徴から小さいオブジェクトをキャプチャするために1回ずつトランスフォーマーデコーダーレイヤーに渡されます。Mask2Formerの要点は、デコーダーの*マスクアテンション*メカニズムです。クロスアテンションが画像全体に注意を向けることができるのに対し、マスクアテンションは画像の特定の領域にのみ焦点を当てます。これは速く、ローカルな画像特徴だけでもモデルが学習できるため、パフォーマンスが向上します。 4. [DETR](tasks_explained#object-detection)と同様に、Mask2Formerも学習されたオブジェクトクエリを使用し、画像の特徴と組み合わせてセットの予測(`クラスラベル`、`マスク予測`)を行います。デコーダーの隠れ状態は線形層に渡され、クラスラベルに対するロジットに変換されます。ロジットと正解ラベル間のクロスエントロピー損失が最も可能性の高いものを見つけます。 マスク予測は、ピクセル埋め込みと最終的なデコーダーの隠れ状態を組み合わせて生成されます。シグモイドクロスエントロピーやダイス損失がロジットと正解マスクの間で最も可能性の高いマスクを見つけます。 セグメンテーションタスクに取り組む準備ができましたか?SegFormerのファインチューニング方法と推論方法を学ぶために、完全な[画像セグメンテーションガイド](tasks/semantic_segmentation)をチェックしてみてください! ### Depth estimation [GLPN](model_doc/glpn)、*Global-Local Path Network*、はセグメンテーションまたは深度推定などの密な予測タスクに適しています。[SegFormer](model_doc/segformer)エンコーダーを軽量デコーダーと組み合わせたTransformerベースの深度推定モデルです。 <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/glpn_architecture.jpg"/> </div> 1. ViTのように、画像はパッチのシーケンスに分割されますが、これらの画像パッチは小さいです。これはセグメンテーションや深度推定などの密な予測タスクに適しています。画像パッチはパッチ埋め込みに変換されます(パッチ埋め込みの作成方法の詳細については、[画像分類](#image-classification)セクションを参照してください)。これらのパッチ埋め込みはエンコーダーに渡されます。 2. エンコーダーはパッチ埋め込みを受け入れ、複数のエンコーダーブロックを通じてそれらを渡します。各ブロックにはアテンションとMix-FFNレイヤーが含まれています。後者の役割は位置情報を提供することです。各エンコーダーブロックの最後には、階層的表現を作成するための*パッチマージング*レイヤーがあります。隣接するパッチのグループごとの特徴が連結され、連結された特徴に対して線形層が適用され、パッチの数を1/4の解像度に削減します。これが次のエンコーダーブロックへの入力となり、ここではこのプロセス全体が繰り返され、元の画像の1/8、1/16、および1/32の解像度の画像特徴が得られます。 3. 軽量デコーダーは、エンコーダーからの最後の特徴マップ(1/32スケール)を受け取り、それを1/16スケールにアップサンプリングします。その後、特徴は各特徴に対するアテンションマップからローカルとグローバルな特徴を選択して組み合わせる*セレクティブフィーチャーフュージョン(SFF)*モジュールに渡され、1/8にアップサンプリングされます。このプロセスはデコードされた特徴が元の画像と同じサイズになるまで繰り返されます。 4. デコードされた特徴は、最終的な予測を行うためにセマンティックセグメンテーション、深度推定、またはその他の密な予測タスクに供給されます。セマンティックセグメンテーションの場合、特徴はクラス数に対するロジットに変換され、クロスエントロピー損失を使用して最適化されます。深度推定の場合、特徴は深度マップに変換され、平均絶対誤差(MAE)または平均二乗誤差(MSE)損失が使用されます。 ## Natural language processing Transformerは最初に機械翻訳のために設計され、それ以降、ほとんどのNLPタスクを解決するためのデフォルトのアーキテクチャとなっています。一部のタスクはTransformerのエンコーダー構造に適しており、他のタスクはデコーダーに適しています。さらに、一部のタスクではTransformerのエンコーダー-デコーダー構造を使用します。 ### Text classification [BERT](model_doc/bert)はエンコーダーのみのモデルであり、テキストの豊かな表現を学習するために両側の単語に注意を払うことで、深い双方向性を効果的に実装した最初のモデルです。 1. BERTは[WordPiece](tokenizer_summary#wordpiece)トークナイゼーションを使用してテキストのトークン埋め込みを生成します。単一の文と文のペアを区別するために、特別な `[SEP]` トークンが追加されます。 `[CLS]` トークンはすべてのテキストシーケンスの先頭に追加されます。 `[CLS]` トークンとともに最終出力は、分類タスクのための入力として使用されます。BERTはまた、トークンが文のペアの最初または2番目の文に属するかどうかを示すセグメント埋め込みを追加します。 2. BERTは、事前トレーニングで2つの目標を使用します:マスクされた言語モデリングと次の文の予測です。マスクされた言語モデリングでは、入力トークンの一部がランダムにマスクされ、モデルはこれらを予測する必要があります。これにより、モデルが全ての単語を見て「次の単語」を予測することができる双方向性の問題が解決されます。予測されたマスクトークンの最終的な隠れた状態は、ソフトマックスを使用した単語のマスクを予測するためのフィードフォワードネットワークに渡されます。 2番目の事前トレーニングオブジェクトは次の文の予測です。モデルは文Aの後に文Bが続くかどうかを予測する必要があります。半分の場合、文Bは次の文であり、残りの半分の場合、文Bはランダムな文です。予測(次の文かどうか)は、2つのクラス(`IsNext`および`NotNext`)に対するソフトマックスを持つフィードフォワードネットワークに渡されます。 3. 入力埋め込みは、最終的な隠れた状態を出力するために複数のエンコーダーレイヤーを介して渡されます。 事前訓練済みモデルをテキスト分類に使用するには、ベースのBERTモデルの上にシーケンス分類ヘッドを追加します。シーケンス分類ヘッドは最終的な隠れた状態を受け入れ、それらをロジットに変換するための線形層です。クロスエントロピー損失は、ロジットとターゲット間で最も可能性の高いラベルを見つけるために計算されます。 テキスト分類を試してみる準備はできましたか?DistilBERTを微調整し、推論に使用する方法を学ぶために、完全な[テキスト分類ガイド](tasks/sequence_classification)をチェックしてみてください! ### Token classification BERTを名前エンティティ認識(NER)などのトークン分類タスクに使用するには、ベースのBERTモデルの上にトークン分類ヘッドを追加します。トークン分類ヘッドは最終的な隠れた状態を受け入れ、それらをロジットに変換するための線形層です。クロスエントロピー損失は、ロジットと各トークン間で最も可能性の高いラベルを見つけるために計算されます。 トークン分類を試してみる準備はできましたか?DistilBERTを微調整し、推論に使用する方法を学ぶために、完全な[トークン分類ガイド](tasks/token_classification)をチェックしてみてください! ### Question answering BERTを質問応答に使用するには、ベースのBERTモデルの上にスパン分類ヘッドを追加します。この線形層は最終的な隠れた状態を受け入れ、回答に対応するテキストの「スパン」開始と終了のロジットを計算します。クロスエントロピー損失は、ロジットとラベル位置との間で最も可能性の高いテキストスパンを見つけるために計算されます。 質問応答を試してみる準備はできましたか?DistilBERTを微調整し、推論に使用する方法を学ぶために、完全な[質問応答ガイド](tasks/question_answering)をチェックしてみてください! <Tip> 💡 注意してください。一度事前トレーニングが完了したBERTを使用してさまざまなタスクに簡単に適用できることに注目してください。必要なのは、事前トレーニング済みモデルに特定のヘッドを追加して、隠れた状態を所望の出力に変換することだけです! </Tip> ### Text generation [GPT-2](model_doc/gpt2)は大量のテキストで事前トレーニングされたデコーダー専用モデルです。プロンプトを与えると説得力のあるテキストを生成し、明示的にトレーニングされていないにもかかわらず、質問応答などの他のNLPタスクも完了できます。 <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/gpt2_architecture.png"/> </div> 1. GPT-2は[バイトペアエンコーディング(BPE)](tokenizer_summary#bytepair-encoding-bpe)を使用して単語をトークナイズし、トークン埋め込みを生成します。位置エンコーディングがトークン埋め込みに追加され、各トークンの位置を示します。入力埋め込みは複数のデコーダーブロックを介して最終的な隠れた状態を出力するために渡されます。各デコーダーブロック内で、GPT-2は「マスクされた自己注意」レイヤーを使用します。これは、GPT-2が未来のトークンに注意を払うことはできないことを意味します。GPT-2は左側のトークンにのみ注意を払うことが許可されています。これはBERTの[`mask`]トークンとは異なり、マスクされた自己注意では未来のトークンに対してスコアを`0`に設定するための注意マスクが使用されます。 2. デコーダーからの出力は、言語モデリングヘッドに渡され、最終的な隠れた状態をロジットに変換するための線形変換を実行します。ラベルはシーケンス内の次のトークンであり、これはロジットを右に1つずらして生成されます。クロスエントロピー損失は、シフトされたロジットとラベル間で計算され、次に最も可能性の高いトークンを出力します。 GPT-2の事前トレーニングの目標は完全に[因果言語モデリング](glossary#causal-language-modeling)に基づいており、シーケンス内の次の単語を予測します。これにより、GPT-2はテキスト生成を含むタスクで特に優れた性能を発揮します。 テキスト生成を試してみる準備はできましたか?DistilGPT-2を微調整し、推論に使用する方法を学ぶために、完全な[因果言語モデリングガイド](tasks/language_modeling#causal-language-modeling)をチェックしてみてください! <Tip> テキスト生成に関する詳細は、[テキスト生成戦略](generation_strategies)ガイドをチェックしてみてください! </Tip> ### Summarization [BART](model_doc/bart) や [T5](model_doc/t5) のようなエンコーダーデコーダーモデルは、要約タスクのシーケンス・トゥ・シーケンス・パターンに設計されています。このセクションでは、BARTの動作方法を説明し、最後にT5の微調整を試すことができます。 <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bart_architecture.png"/> </div> 1. BARTのエンコーダーアーキテクチャは、BERTと非常に似ており、テキストのトークンと位置エンベディングを受け入れます。BARTは、入力を破壊してからデコーダーで再構築することによって事前トレーニングされます。特定の破壊戦略を持つ他のエンコーダーとは異なり、BARTは任意の種類の破壊を適用できます。ただし、*テキストインフィリング*破壊戦略が最適です。テキストインフィリングでは、いくつかのテキストスパンが**単一の** [`mask`] トークンで置き換えられます。これは重要です、なぜならモデルはマスクされたトークンを予測しなければならず、モデルに欠落トークンの数を予測させるからです。入力埋め込みとマスクされたスパンはエンコーダーを介して最終的な隠れた状態を出力しますが、BERTとは異なり、BARTは単語を予測するための最終的なフィードフォワードネットワークを最後に追加しません。 2. エンコーダーの出力はデコーダーに渡され、デコーダーはエンコーダーの出力からマスクされたトークンと非破壊トークンを予測する必要があります。これにより、デコーダーは元のテキストを復元するのに役立つ追加のコンテキストが提供されます。デコーダーからの出力は言語モデリングヘッドに渡され、隠れた状態をロジットに変換するための線形変換を実行します。クロスエントロピー損失は、ロジットとラベルの間で計算され、ラベルは単に右にシフトされたトークンです。 要約を試す準備はできましたか?T5を微調整して推論に使用する方法を学ぶために、完全な[要約ガイド](tasks/summarization)をご覧ください! <Tip> テキスト生成に関する詳細は、[テキスト生成戦略](generation_strategies)ガイドをチェックしてみてください! </Tip> ### Translation 翻訳は、もう一つのシーケンス・トゥ・シーケンス・タスクの例であり、[BART](model_doc/bart) や [T5](model_doc/t5) のようなエンコーダーデコーダーモデルを使用して実行できます。このセクションでは、BARTの動作方法を説明し、最後にT5の微調整を試すことができます。 BARTは、ソース言語をターゲット言語にデコードできるようにするために、別個にランダムに初期化されたエンコーダーを追加することで翻訳に適応します。この新しいエンコーダーの埋め込みは、元の単語埋め込みの代わりに事前トレーニング済みのエンコーダーに渡されます。ソースエンコーダーは、モデルの出力からのクロスエントロピー損失を用いてソースエンコーダー、位置エンベディング、および入力エンベディングを更新することによって訓練されます。この最初のステップではモデルパラメータが固定され、すべてのモデルパラメータが2番目のステップで一緒に訓練されます。 その後、翻訳のために多言語版のmBARTが登場し、多言語で事前トレーニングされたモデルとして利用可能です。 翻訳を試す準備はできましたか?T5を微調整して推論に使用する方法を学ぶために、完全な[翻訳ガイド](tasks/summarization)をご覧ください! <Tip> テキスト生成に関する詳細は、[テキスト生成戦略](generation_strategies)ガイドをチェックしてみてください! </Tip>
transformers/docs/source/ja/tasks_explained.md/0
{ "file_path": "transformers/docs/source/ja/tasks_explained.md", "repo_id": "transformers", "token_count": 16553 }
319
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # BERTology BERT와 같은 대규모 트랜스포머의 내부 동작을 조사하는 연구 분야가 점점 더 중요해지고 있습니다. 혹자는 "BERTology"라 칭하기도 합니다. 이 분야의 좋은 예시는 다음과 같습니다: - BERT는 고전적인 NLP 파이프라인의 재발견 - Ian Tenney, Dipanjan Das, Ellie Pavlick: https://arxiv.org/abs/1905.05950 - 16개의 헤드가 정말로 1개보다 나은가? - Paul Michel, Omer Levy, Graham Neubig: https://arxiv.org/abs/1905.10650 - BERT는 무엇을 보는가? BERT의 어텐션 분석 - Kevin Clark, Urvashi Khandelwal, Omer Levy, Christopher D. Manning: https://arxiv.org/abs/1906.04341 - CAT-probing: 프로그래밍 언어에 대해 사전훈련된 모델이 어떻게 코드 구조를 보는지 알아보기 위한 메트릭 기반 접근 방법: https://arxiv.org/abs/2210.04633 우리는 이 새로운 연구 분야의 발전을 돕기 위해, BERT/GPT/GPT-2 모델에 내부 표현을 살펴볼 수 있는 몇 가지 기능을 추가했습니다. 이 기능들은 주로 Paul Michel의 훌륭한 작업을 참고하여 개발되었습니다 (https://arxiv.org/abs/1905.10650): - BERT/GPT/GPT-2의 모든 은닉 상태에 접근하기, - BERT/GPT/GPT-2의 각 헤드의 모든 어텐션 가중치에 접근하기, - 헤드의 출력 값과 그래디언트를 검색하여 헤드 중요도 점수를 계산하고 https://arxiv.org/abs/1905.10650에서 설명된 대로 헤드를 제거하는 기능을 제공합니다. 이러한 기능들을 이해하고 직접 사용해볼 수 있도록 [bertology.py](https://github.com/huggingface/transformers/tree/main/examples/research_projects/bertology/run_bertology.py) 예제 스크립트를 추가했습니다. 이 예제 스크립트에서는 GLUE에 대해 사전훈련된 모델에서 정보를 추출하고 모델을 가지치기(prune)해봅니다.
transformers/docs/source/ko/bertology.md/0
{ "file_path": "transformers/docs/source/ko/bertology.md", "repo_id": "transformers", "token_count": 1557 }
320
<!--- Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 설치방법[[installation]] 🤗 Transformers를 사용 중인 딥러닝 라이브러리에 맞춰 설치하고, 캐시를 구성하거나 선택적으로 오프라인에서도 실행할 수 있도록 🤗 Transformers를 설정하는 방법을 배우겠습니다. 🤗 Transformers는 Python 3.6+, PyTorch 1.1.0+, TensorFlow 2.0+ 및 Flax에서 테스트되었습니다. 딥러닝 라이브러리를 설치하려면 아래 링크된 저마다의 공식 사이트를 참고해주세요. * [PyTorch](https://pytorch.org/get-started/locally/) 설치하기 * [TensorFlow 2.0](https://www.tensorflow.org/install/pip) 설치하기 * [Flax](https://flax.readthedocs.io/en/latest/) 설치하기 ## pip으로 설치하기[[install-with-pip]] 🤗 Transformers를 [가상 환경](https://docs.python.org/3/library/venv.html)에 설치하는 것을 추천드립니다. Python 가상 환경에 익숙하지 않다면, 이 [가이드](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/)를 참고하세요. 가상 환경을 사용하면 서로 다른 프로젝트들을 보다 쉽게 관리할 수 있고, 의존성 간의 호환성 문제를 방지할 수 있습니다. 먼저 프로젝트 디렉토리에서 가상 환경을 만들어 줍니다. ```bash python -m venv .env ``` 가상 환경을 활성화해주세요. Linux나 MacOS의 경우: ```bash source .env/bin/activate ``` Windows의 경우: ```bash .env/Scripts/activate ``` 이제 🤗 Transformers를 설치할 준비가 되었습니다. 다음 명령을 입력해주세요. ```bash pip install transformers ``` CPU만 써도 된다면, 🤗 Transformers와 딥러닝 라이브러리를 단 1줄로 설치할 수 있습니다. 예를 들어 🤗 Transformers와 PyTorch의 경우: ```bash pip install transformers[torch] ``` 🤗 Transformers와 TensorFlow 2.0의 경우: ```bash pip install transformers[tf-cpu] ``` 🤗 Transformers와 Flax의 경우: ```bash pip install transformers[flax] ``` 마지막으로 🤗 Transformers가 제대로 설치되었는지 확인할 차례입니다. 사전훈련된 모델을 다운로드하는 코드입니다. ```bash python -c "from transformers import pipeline; print(pipeline('sentiment-analysis')('we love you'))" ``` 라벨과 점수가 출력되면 잘 설치된 것입니다. ```bash [{'label': 'POSITIVE', 'score': 0.9998704791069031}] ``` ## 소스에서 설치하기[[install-from-source]] 🤗 Transformers를 소스에서 설치하려면 아래 명령을 실행하세요. ```bash pip install git+https://github.com/huggingface/transformers ``` 위 명령은 최신이지만 (안정적인) `stable` 버전이 아닌 실험성이 짙은 `main` 버전을 설치합니다. `main` 버전은 개발 현황과 발맞추는데 유용합니다. 예시로 마지막 공식 릴리스 이후 발견된 버그가 패치되었지만, 새 릴리스로 아직 롤아웃되지는 않은 경우를 들 수 있습니다. 바꿔 말하면 `main` 버전이 안정성과는 거리가 있다는 뜻이기도 합니다. 저희는 `main` 버전을 사용하는데 문제가 없도록 노력하고 있으며, 대부분의 문제는 대개 몇 시간이나 하루 안에 해결됩니다. 만약 문제가 발생하면 [이슈](https://github.com/huggingface/transformers/issues)를 열어주시면 더 빨리 해결할 수 있습니다! 전과 마찬가지로 🤗 Transformers가 제대로 설치되었는지 확인할 차례입니다. ```bash python -c "from transformers import pipeline; print(pipeline('sentiment-analysis')('I love you'))" ``` ## 수정 가능한 설치[[editable-install]] 수정 가능한 설치가 필요한 경우는 다음과 같습니다. * `main` 버전의 소스 코드를 사용하기 위해 * 🤗 Transformers에 기여하고 싶어서 코드의 변경 사항을 테스트하기 위해 리포지터리를 복제하고 🤗 Transformers를 설치하려면 다음 명령을 입력해주세요. ```bash git clone https://github.com/huggingface/transformers.git cd transformers pip install -e . ``` 위 명령은 리포지터리를 복제한 위치의 폴더와 Python 라이브러리의 경로를 연결시킵니다. Python이 일반 라이브러리 경로 외에 복제한 폴더 내부를 확인할 것입니다. 예를 들어 Python 패키지가 일반적으로 `~/anaconda3/envs/main/lib/python3.7/site-packages/`에 설치되어 있는데, 명령을 받은 Python이 이제 복제한 폴더인 `~/transformers/`도 검색하게 됩니다. <Tip warning={true}> 라이브러리를 계속 사용하려면 `transformers` 폴더를 꼭 유지해야 합니다. </Tip> 복제본은 최신 버전의 🤗 Transformers로 쉽게 업데이트할 수 있습니다. ```bash cd ~/transformers/ git pull ``` Python 환경을 다시 실행하면 업데이트된 🤗 Transformers의 `main` 버전을 찾아낼 것입니다. ## conda로 설치하기[[install-with-conda]] `conda-forge` conda 채널에서 설치할 수 있습니다. ```bash conda install conda-forge::transformers ``` ## 캐시 구성하기[[cache-setup]] 사전훈련된 모델은 다운로드된 후 로컬 경로 `~/.cache/huggingface/hub`에 캐시됩니다. 셸 환경 변수 `TRANSFORMERS_CACHE`의 기본 디렉터리입니다. Windows의 경우 기본 디렉터리는 `C:\Users\username\.cache\huggingface\hub`입니다. 아래의 셸 환경 변수를 (우선 순위) 순서대로 변경하여 다른 캐시 디렉토리를 지정할 수 있습니다. 1. 셸 환경 변수 (기본): `HUGGINGFACE_HUB_CACHE` 또는 `TRANSFORMERS_CACHE` 2. 셸 환경 변수: `HF_HOME` 3. 셸 환경 변수: `XDG_CACHE_HOME` + `/huggingface` <Tip> 과거 🤗 Transformers에서 쓰였던 셸 환경 변수 `PYTORCH_TRANSFORMERS_CACHE` 또는 `PYTORCH_PRETRAINED_BERT_CACHE`이 설정되있다면, 셸 환경 변수 `TRANSFORMERS_CACHE`을 지정하지 않는 한 우선 사용됩니다. </Tip> ## 오프라인 모드[[offline-mode]] 🤗 Transformers를 로컬 파일만 사용하도록 해서 방화벽 또는 오프라인 환경에서 실행할 수 있습니다. 활성화하려면 `HF_HUB_OFFLINE=1` 환경 변수를 설정하세요. <Tip> `HF_DATASETS_OFFLINE=1` 환경 변수를 설정하여 오프라인 훈련 과정에 [🤗 Datasets](https://huggingface.co/docs/datasets/)을 추가할 수 있습니다. </Tip> 예를 들어 외부 기기 사이에 방화벽을 둔 일반 네트워크에서 평소처럼 프로그램을 다음과 같이 실행할 수 있습니다. ```bash python examples/pytorch/translation/run_translation.py --model_name_or_path google-t5/t5-small --dataset_name wmt16 --dataset_config ro-en ... ``` 오프라인 기기에서 동일한 프로그램을 다음과 같이 실행할 수 있습니다. ```bash HF_DATASETS_OFFLINE=1 HF_HUB_OFFLINE=1 \ python examples/pytorch/translation/run_translation.py --model_name_or_path google-t5/t5-small --dataset_name wmt16 --dataset_config ro-en ... ``` 이제 스크립트는 로컬 파일에 한해서만 검색할 것이므로, 스크립트가 중단되거나 시간이 초과될 때까지 멈춰있지 않고 잘 실행될 것입니다. ### 오프라인용 모델 및 토크나이저 만들어두기[[fetch-models-and-tokenizers-to-use-offline]] Another option for using 🤗 Transformers offline is to download the files ahead of time, and then point to their local path when you need to use them offline. There are three ways to do this: 🤗 Transformers를 오프라인으로 사용하는 또 다른 방법은 파일을 미리 다운로드한 다음, 오프라인일 때 사용할 로컬 경로를 지정해두는 것입니다. 3가지 중 편한 방법을 고르세요. * [Model Hub](https://huggingface.co/models)의 UI를 통해 파일을 다운로드하려면 ↓ 아이콘을 클릭하세요. ![download-icon](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/download-icon.png) * [`PreTrainedModel.from_pretrained`]와 [`PreTrainedModel.save_pretrained`] 워크플로를 활용하세요. 1. 미리 [`PreTrainedModel.from_pretrained`]로 파일을 다운로드해두세요. ```py >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM >>> tokenizer = AutoTokenizer.from_pretrained("bigscience/T0_3B") >>> model = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0_3B") ``` 2. [`PreTrainedModel.save_pretrained`]로 지정된 경로에 파일을 저장해두세요. ```py >>> tokenizer.save_pretrained("./your/path/bigscience_t0") >>> model.save_pretrained("./your/path/bigscience_t0") ``` 3. 이제 오프라인일 때 [`PreTrainedModel.from_pretrained`]로 저장해뒀던 파일을 지정된 경로에서 다시 불러오세요. ```py >>> tokenizer = AutoTokenizer.from_pretrained("./your/path/bigscience_t0") >>> model = AutoModel.from_pretrained("./your/path/bigscience_t0") ``` * [huggingface_hub](https://github.com/huggingface/huggingface_hub/tree/main/src/huggingface_hub) 라이브러리를 활용해서 파일을 다운로드하세요. 1. 가상환경에 `huggingface_hub` 라이브러리를 설치하세요. ```bash python -m pip install huggingface_hub ``` 2. [`hf_hub_download`](https://huggingface.co/docs/hub/adding-a-library#download-files-from-the-hub) 함수로 파일을 특정 위치에 다운로드할 수 있습니다. 예를 들어 아래 명령은 [T0](https://huggingface.co/bigscience/T0_3B) 모델의 `config.json` 파일을 지정된 경로에 다운로드합니다. ```py >>> from huggingface_hub import hf_hub_download >>> hf_hub_download(repo_id="bigscience/T0_3B", filename="config.json", cache_dir="./your/path/bigscience_t0") ``` 파일을 다운로드하고 로컬에 캐시 해놓고 나면, 나중에 불러와 사용할 수 있도록 로컬 경로를 지정해두세요. ```py >>> from transformers import AutoConfig >>> config = AutoConfig.from_pretrained("./your/path/bigscience_t0/config.json") ``` <Tip> Hub에 저장된 파일을 다운로드하는 방법을 더 자세히 알아보려면 [Hub에서 파일 다운로드하기](https://huggingface.co/docs/hub/how-to-downstream) 섹션을 참고해주세요. </Tip>
transformers/docs/source/ko/installation.md/0
{ "file_path": "transformers/docs/source/ko/installation.md", "repo_id": "transformers", "token_count": 6897 }
321
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 단일 GPU에서 효율적인 추론 [[efficient-inference-on-a-single-gpu]] 이 가이드 외에도, [단일 GPU에서의 훈련 가이드](perf_train_gpu_one)와 [CPU에서의 추론 가이드](perf_infer_cpu)에서도 관련 정보를 찾을 수 있습니다. ## Better Transformer: PyTorch 네이티브 Transformer 패스트패스 [[better-transformer-pytorchnative-transformer-fastpath]] PyTorch 네이티브 [`nn.MultiHeadAttention`](https://pytorch.org/blog/a-better-transformer-for-fast-transformer-encoder-inference/) 어텐션 패스트패스인 BetterTransformer는 [🤗 Optimum 라이브러리](https://huggingface.co/docs/optimum/bettertransformer/overview)의 통합을 통해 Transformers와 함께 사용할 수 있습니다. PyTorch의 어텐션 패스트패스는 커널 퓨전과 [중첩된 텐서](https://pytorch.org/docs/stable/nested.html)의 사용을 통해 추론 속도를 높일 수 있습니다. 자세한 벤치마크는 [이 블로그 글](https://medium.com/pytorch/bettertransformer-out-of-the-box-performance-for-huggingface-transformers-3fbe27d50ab2)에서 확인할 수 있습니다. [`optimum`](https://github.com/huggingface/optimum) 패키지를 설치한 후에는 추론 중 Better Transformer를 사용할 수 있도록 [`~PreTrainedModel.to_bettertransformer`]를 호출하여 관련 내부 모듈을 대체합니다: ```python model = model.to_bettertransformer() ``` [`~PreTrainedModel.reverse_bettertransformer`] 메소드는 정규화된 transformers 모델링을 사용하기 위해 모델을 저장하기 전 원래의 모델링으로 돌아갈 수 있도록 해줍니다: ```python model = model.reverse_bettertransformer() model.save_pretrained("saved_model") ``` PyTorch 2.0부터는 어텐션 패스트패스가 인코더와 디코더 모두에서 지원됩니다. 지원되는 아키텍처 목록은 [여기](https://huggingface.co/docs/optimum/bettertransformer/overview#supported-models)에서 확인할 수 있습니다. ## FP4 혼합 정밀도 추론을 위한 `bitsandbytes` 통합 [[bitsandbytes-integration-for-fp4-mixedprecision-inference]] `bitsandbytes`를 설치하면 GPU에서 손쉽게 모델을 압축할 수 있습니다. FP4 양자화를 사용하면 원래의 전체 정밀도 버전과 비교하여 모델 크기를 최대 8배 줄일 수 있습니다. 아래에서 시작하는 방법을 확인하세요. <Tip> 이 기능은 다중 GPU 설정에서도 사용할 수 있습니다. </Tip> ### 요구 사항 [[requirements-for-fp4-mixedprecision-inference]] - 최신 `bitsandbytes` 라이브러리 `pip install bitsandbytes>=0.39.0` - 최신 `accelerate`를 소스에서 설치 `pip install git+https://github.com/huggingface/accelerate.git` - 최신 `transformers`를 소스에서 설치 `pip install git+https://github.com/huggingface/transformers.git` ### FP4 모델 실행 - 단일 GPU 설정 - 빠른 시작 [[running-fp4-models-single-gpu-setup-quickstart]] 다음 코드를 실행하여 단일 GPU에서 빠르게 FP4 모델을 실행할 수 있습니다. ```py from transformers import AutoModelForCausalLM model_name = "bigscience/bloom-2b5" model_4bit = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", load_in_4bit=True) ``` `device_map`은 선택 사항입니다. 그러나 `device_map = 'auto'`로 설정하는 것이 사용 가능한 리소스를 효율적으로 디스패치하기 때문에 추론에 있어 권장됩니다. ### FP4 모델 실행 - 다중 GPU 설정 [[running-fp4-models-multi-gpu-setup]] 다중 GPU에서 혼합 4비트 모델을 가져오는 방법은 단일 GPU 설정과 동일합니다(동일한 명령어 사용): ```py model_name = "bigscience/bloom-2b5" model_4bit = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", load_in_4bit=True) ``` 하지만 `accelerate`를 사용하여 각 GPU에 할당할 GPU RAM을 제어할 수 있습니다. 다음과 같이 `max_memory` 인수를 사용하세요: ```py max_memory_mapping = {0: "600MB", 1: "1GB"} model_name = "bigscience/bloom-3b" model_4bit = AutoModelForCausalLM.from_pretrained( model_name, device_map="auto", load_in_4bit=True, max_memory=max_memory_mapping ) ``` 이 예에서는 첫 번째 GPU가 600MB의 메모리를 사용하고 두 번째 GPU가 1GB를 사용합니다. ### 고급 사용법 [[advanced-usage]] 이 방법의 더 고급 사용법에 대해서는 [양자화](main_classes/quantization) 문서 페이지를 참조하세요. ## Int8 혼합 정밀도 행렬 분해를 위한 `bitsandbytes` 통합 [[bitsandbytes-integration-for-int8-mixedprecision-matrix-decomposition]] <Tip> 이 기능은 다중 GPU 설정에서도 사용할 수 있습니다. </Tip> [`LLM.int8() : 8-bit Matrix Multiplication for Transformers at Scale`](https://arxiv.org/abs/2208.07339) 논문에서 우리는 몇 줄의 코드로 Hub의 모든 모델에 대한 Hugging Face 통합을 지원합니다. 이 방법은 `float16` 및 `bfloat16` 가중치에 대해 `nn.Linear` 크기를 2배로 줄이고, `float32` 가중치에 대해 4배로 줄입니다. 이는 절반 정밀도에서 이상치를 처리함으로써 품질에 거의 영향을 미치지 않습니다. ![HFxbitsandbytes.png](https://cdn-uploads.huggingface.co/production/uploads/1659861207959-62441d1d9fdefb55a0b7d12c.png) Int8 혼합 정밀도 행렬 분해는 행렬 곱셈을 두 개의 스트림으로 분리합니다: (1) fp16로 곱해지는 체계적인 특이값 이상치 스트림 행렬(0.01%) 및 (2) int8 행렬 곱셈의 일반적인 스트림(99.9%). 이 방법을 사용하면 매우 큰 모델에 대해 예측 저하 없이 int8 추론이 가능합니다. 이 방법에 대한 자세한 내용은 [논문](https://arxiv.org/abs/2208.07339)이나 [통합에 관한 블로그 글](https://huggingface.co/blog/hf-bitsandbytes-integration)에서 확인할 수 있습니다. ![MixedInt8.gif](https://cdn-uploads.huggingface.co/production/uploads/1660567469965-62441d1d9fdefb55a0b7d12c.gif) 커널은 GPU 전용으로 컴파일되어 있기 때문에 혼합 8비트 모델을 실행하려면 GPU가 필요합니다. 이 기능을 사용하기 전에 모델의 1/4(또는 모델 가중치가 절반 정밀도인 경우 절반)을 저장할 충분한 GPU 메모리가 있는지 확인하세요. 이 모듈을 사용하는 데 도움이 되는 몇 가지 참고 사항이 아래에 나와 있습니다. 또는 [Google colab](#colab-demos)에서 데모를 따라할 수도 있습니다. ### 요구 사항 [[requirements-for-int8-mixedprecision-matrix-decomposition]] - `bitsandbytes<0.37.0`을 사용하는 경우, 8비트 텐서 코어(Turing, Ampere 또는 이후 아키텍처 - 예: T4, RTX20s RTX30s, A40-A100)를 지원하는 NVIDIA GPU에서 실행하는지 확인하세요. `bitsandbytes>=0.37.0`을 사용하는 경우, 모든 GPU가 지원됩니다. - 올바른 버전의 `bitsandbytes`를 다음 명령으로 설치하세요: `pip install bitsandbytes>=0.31.5` - `accelerate`를 설치하세요 `pip install accelerate>=0.12.0` ### 혼합 Int8 모델 실행 - 단일 GPU 설정 [[running-mixedint8-models-single-gpu-setup]] 필요한 라이브러리를 설치한 후 혼합 8비트 모델을 가져오는 방법은 다음과 같습니다: ```py from transformers import AutoModelForCausalLM, BitsAndBytesConfig model_name = "bigscience/bloom-2b5" model_8bit = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=BitsAndBytesConfig(load_in_8bit=True)) ``` 텍스트 생성의 경우: * `pipeline()` 함수 대신 모델의 `generate()` 메소드를 사용하는 것을 권장합니다. `pipeline()` 함수로는 추론이 가능하지만, 혼합 8비트 모델에 최적화되지 않았기 때문에 `generate()` 메소드를 사용하는 것보다 느릴 수 있습니다. 또한, nucleus 샘플링과 같은 일부 샘플링 전략은 혼합 8비트 모델에 대해 `pipeline()` 함수에서 지원되지 않습니다. * 입력을 모델과 동일한 GPU에 배치하는 것이 좋습니다. 다음은 간단한 예입니다: ```py from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig model_name = "bigscience/bloom-2b5" tokenizer = AutoTokenizer.from_pretrained(model_name) model_8bit = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=BitsAndBytesConfig(load_in_8bit=True)) prompt = "Hello, my llama is cute" inputs = tokenizer(prompt, return_tensors="pt").to("cuda") generated_ids = model.generate(**inputs) outputs = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) ``` ### 혼합 Int8 모델 실행 - 다중 GPU 설정 [[running-mixedint8-models-multi-gpu-setup]] 다중 GPU에서 혼합 8비트 모델을 로드하는 방법은 단일 GPU 설정과 동일합니다(동일한 명령어 사용): ```py model_name = "bigscience/bloom-2b5" model_8bit = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=BitsAndBytesConfig(load_in_8bit=True)) ``` 하지만 `accelerate`를 사용하여 각 GPU에 할당할 GPU RAM을 제어할 수 있습니다. 다음과 같이 `max_memory` 인수를 사용하세요: ```py max_memory_mapping = {0: "1GB", 1: "2GB"} model_name = "bigscience/bloom-3b" model_8bit = AutoModelForCausalLM.from_pretrained( model_name, device_map="auto", load_in_8bit=True, max_memory=max_memory_mapping ) ``` 이 예시에서는 첫 번째 GPU가 1GB의 메모리를 사용하고 두 번째 GPU가 2GB를 사용합니다. ### Colab 데모 [[colab-demos]] 이 방법을 사용하면 이전에 Google Colab에서 추론할 수 없었던 모델에 대해 추론할 수 있습니다. Google Colab에서 8비트 양자화를 사용하여 T5-11b(42GB in fp32)를 실행하는 데모를 확인하세요: [![Open In Colab: T5-11b demo](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1YORPWx4okIHXnjW7MSAidXN29mPVNT7F?usp=sharing) 또는 BLOOM-3B에 대한 데모를 확인하세요: [![Open In Colab: BLOOM-3b demo](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1qOjXfQIAULfKvZqwCen8-MoWKGdSatZ4?usp=sharing)
transformers/docs/source/ko/perf_infer_gpu_one.md/0
{ "file_path": "transformers/docs/source/ko/perf_infer_gpu_one.md", "repo_id": "transformers", "token_count": 6517 }
322
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Quanto[[quanto]] <Tip> 이 [노트북](https://colab.research.google.com/drive/16CXfVmtdQvciSh9BopZUDYcmXCDpvgrT?usp=sharing)으로 Quanto와 transformers를 사용해 보세요! </Tip> [🤗 Quanto](https://github.com/huggingface/optimum-quanto) 라이브러리는 다목적 파이토치 양자화 툴킷입니다. 이 라이브러리에서 사용되는 양자화 방법은 선형 양자화입니다. Quanto는 다음과 같은 여러 가지 기능을 제공합니다: - 가중치 양자화 (`float8`,`int8`,`int4`,`int2`) - 활성화 양자화 (`float8`,`int8`) - 모달리티에 구애받지 않음 (e.g CV,LLM) - 장치에 구애받지 않음 (e.g CUDA,MPS,CPU) - `torch.compile` 호환성 - 특정 장치에 대한 사용자 정의 커널의 쉬운 추가 - QAT(양자화를 고려한 학습) 지원 <!-- Add link to the blogpost --> 시작하기 전에 다음 라이브러리가 설치되어 있는지 확인하세요: ```bash pip install quanto accelerate transformers ``` 이제 [`~PreTrainedModel.from_pretrained`] 메소드에 [`QuantoConfig`] 객체를 전달하여 모델을 양자화할 수 있습니다. 이 방식은 `torch.nn.Linear` 레이어를 포함하는 모든 모달리티의 모든 모델에서 잘 작동합니다. 허깅페이스의 transformers 라이브러리는 개발자 편의를 위해 quanto의 인터페이스를 일부 통합하여 지원하고 있으며, 이 방식으로는 가중치 양자화만 지원합니다. 활성화 양자화, 캘리브레이션, QAT 같은 더 복잡한 기능을 수행하기 위해서는 [quanto](https://github.com/huggingface/optimum-quanto) 라이브러리의 해당 함수를 직접 호출해야 합니다. ```py from transformers import AutoModelForCausalLM, AutoTokenizer, QuantoConfig model_id = "facebook/opt-125m" tokenizer = AutoTokenizer.from_pretrained(model_id) quantization_config = QuantoConfig(weights="int8") quantized_model = AutoModelForCausalLM.from_pretrained(model_id, device_map="cuda:0", quantization_config=quantization_config) ``` 참고로, transformers에서는 아직 직렬화가 지원되지 않지만 곧 지원될 예정입니다! 모델을 저장하고 싶으면 quanto 라이브러리를 대신 사용할 수 있습니다. Quanto 라이브러리는 양자화를 위해 선형 양자화 알고리즘을 사용합니다. 비록 기본적인 양자화 기술이지만, 좋은 결과를 얻는데 아주 큰 도움이 됩니다! 바로 아래에 있는 벤치마크(llama-2-7b의 펄플렉서티 지표)를 확인해 보세요. 더 많은 벤치마크는 [여기](https://github.com/huggingface/quanto/tree/main/bench/generation) 에서 찾을 수 있습니다. <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/quantization/NousResearch-Llama-2-7b-hf_Perplexity.png" alt="llama-2-7b-quanto-perplexity" /> </div> </div> 이 라이브러리는 대부분의 PTQ 최적화 알고리즘과 호환될 만큼 충분히 유연합니다. 앞으로의 계획은 가장 인기 있는 알고리즘(AWQ, Smoothquant)을 최대한 매끄럽게 통합하는 것입니다.
transformers/docs/source/ko/quantization/quanto.md/0
{ "file_path": "transformers/docs/source/ko/quantization/quanto.md", "repo_id": "transformers", "token_count": 2333 }
323
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 마스크 생성[[mask-generation]] 마스크 생성(Mask generation)은 이미지에 대한 의미 있는 마스크를 생성하는 작업입니다. 이 작업은 [이미지 분할](semantic_segmentation)과 매우 유사하지만, 많은 차이점이 있습니다. 이미지 분할 모델은 라벨이 달린 데이터셋으로 학습되며, 학습 중에 본 클래스들로만 제한됩니다. 이미지가 주어지면, 이미지 분할 모델은 여러 마스크와 그에 해당하는 클래스를 반환합니다. 반면, 마스크 생성 모델은 대량의 데이터로 학습되며 두 가지 모드로 작동합니다. - 프롬프트 모드(Prompting mode): 이 모드에서는 모델이 이미지와 프롬프트를 입력받습니다. 프롬프트는 이미지 내 객체의 2D 좌표(XY 좌표)나 객체를 둘러싼 바운딩 박스가 될 수 있습니다. 프롬프트 모드에서는 모델이 프롬프트가 가리키는 객체의 마스크만 반환합니다. - 전체 분할 모드(Segment Everything mode): 이 모드에서는 주어진 이미지 내에서 모든 마스크를 생성합니다. 이를 위해 그리드 형태의 점들을 생성하고 이를 이미지에 오버레이하여 추론합니다. 마스크 생성 작업은 [전체 분할 모드(Segment Anything Model, SAM)](model_doc/sam)에 의해 지원됩니다. SAM은 Vision Transformer 기반 이미지 인코더, 프롬프트 인코더, 그리고 양방향 트랜스포머 마스크 디코더로 구성된 강력한 모델입니다. 이미지와 프롬프트는 인코딩되고, 디코더는 이러한 임베딩을 받아 유효한 마스크를 생성합니다. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/sam.png" alt="SAM Architecture"/> </div> SAM은 대규모 데이터를 다룰 수 있는 강력한 분할 기반 모델입니다. 이 모델은 100만 개의 이미지와 11억 개의 마스크를 포함하는 [SA-1B](https://ai.meta.com/datasets/segment-anything/) 데이터 세트로 학습되었습니다. 이 가이드에서는 다음과 같은 내용을 배우게 됩니다: - 배치 처리와 함께 전체 분할 모드에서 추론하는 방법 - 포인트 프롬프팅 모드에서 추론하는 방법 - 박스 프롬프팅 모드에서 추론하는 방법 먼저, `transformers`를 설치해 봅시다: ```bash pip install -q transformers ``` ## 마스크 생성 파이프라인[[mask-generation-pipeline]] 마스크 생성 모델로 추론하는 가장 쉬운 방법은 `mask-generation` 파이프라인을 사용하는 것입니다. ```python >>> from transformers import pipeline >>> checkpoint = "facebook/sam-vit-base" >>> mask_generator = pipeline(model=checkpoint, task="mask-generation") ``` 이미지를 예시로 봅시다. ```python from PIL import Image import requests img_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg" image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg" alt="Example Image"/> </div> 전체적으로 분할해봅시다. `points-per-batch`는 전체 분할 모드에서 점들의 병렬 추론을 가능하게 합니다. 이를 통해 추론 속도가 빨라지지만, 더 많은 메모리를 소모하게 됩니다. 또한, SAM은 이미지가 아닌 점들에 대해서만 배치 처리를 지원합니다. `pred_iou_thresh`는 IoU 신뢰 임계값으로, 이 임계값을 초과하는 마스크만 반환됩니다. ```python masks = mask_generator(image, points_per_batch=128, pred_iou_thresh=0.88) ``` `masks` 는 다음과 같이 생겼습니다: ```bash {'masks': [array([[False, False, False, ..., True, True, True], [False, False, False, ..., True, True, True], [False, False, False, ..., True, True, True], ..., [False, False, False, ..., False, False, False], [False, False, False, ..., False, False, False], [False, False, False, ..., False, False, False]]), array([[False, False, False, ..., False, False, False], [False, False, False, ..., False, False, False], [False, False, False, ..., False, False, False], ..., 'scores': tensor([0.9972, 0.9917, ..., } ``` 위 내용을 아래와 같이 시각화할 수 있습니다: ```python import matplotlib.pyplot as plt plt.imshow(image, cmap='gray') for i, mask in enumerate(masks["masks"]): plt.imshow(mask, cmap='viridis', alpha=0.1, vmin=0, vmax=1) plt.axis('off') plt.show() ``` 아래는 회색조 원본 이미지에 다채로운 색상의 맵을 겹쳐놓은 모습입니다. 매우 인상적인 결과입니다. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee_segmented.png" alt="Visualized"/> </div> ## 모델 추론[[model-inference]] ### 포인트 프롬프팅[[point-prompting]] 파이프라인 없이도 모델을 사용할 수 있습니다. 이를 위해 모델과 프로세서를 초기화해야 합니다. ```python from transformers import SamModel, SamProcessor import torch device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model = SamModel.from_pretrained("facebook/sam-vit-base").to(device) processor = SamProcessor.from_pretrained("facebook/sam-vit-base") ``` 포인트 프롬프팅을 하기 위해, 입력 포인트를 프로세서에 전달한 다음, 프로세서 출력을 받아 모델에 전달하여 추론합니다. 모델 출력을 후처리하려면, 출력과 함께 프로세서의 초기 출력에서 가져온 `original_sizes`와 `reshaped_input_sizes`를 전달해야 합니다. 왜냐하면, 프로세서가 이미지 크기를 조정하고 출력을 추정해야 하기 때문입니다. ```python input_points = [[[2592, 1728]]] # 벌의 포인트 위치 inputs = processor(image, input_points=input_points, return_tensors="pt").to(device) with torch.no_grad(): outputs = model(**inputs) masks = processor.image_processor.post_process_masks(outputs.pred_masks.cpu(), inputs["original_sizes"].cpu(), inputs["reshaped_input_sizes"].cpu()) ``` `masks` 출력으로 세 가지 마스크를 시각화할 수 있습니다. ```python import matplotlib.pyplot as plt import numpy as np fig, axes = plt.subplots(1, 4, figsize=(15, 5)) axes[0].imshow(image) axes[0].set_title('Original Image') mask_list = [masks[0][0][0].numpy(), masks[0][0][1].numpy(), masks[0][0][2].numpy()] for i, mask in enumerate(mask_list, start=1): overlayed_image = np.array(image).copy() overlayed_image[:,:,0] = np.where(mask == 1, 255, overlayed_image[:,:,0]) overlayed_image[:,:,1] = np.where(mask == 1, 0, overlayed_image[:,:,1]) overlayed_image[:,:,2] = np.where(mask == 1, 0, overlayed_image[:,:,2]) axes[i].imshow(overlayed_image) axes[i].set_title(f'Mask {i}') for ax in axes: ax.axis('off') plt.show() ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/masks.png" alt="Visualized"/> </div> ### 박스 프롬프팅[[box-prompting]] 박스 프롬프팅도 포인트 프롬프팅과 유사한 방식으로 할 수 있습니다. 입력 박스를 `[x_min, y_min, x_max, y_max]` 형식의 리스트로 작성하여 이미지와 함께 `processor`에 전달할 수 있습니다. 프로세서 출력을 받아 모델에 직접 전달한 후, 다시 출력을 후처리해야 합니다. ```python # 벌 주위의 바운딩 박스 box = [2350, 1600, 2850, 2100] inputs = processor( image, input_boxes=[[[box]]], return_tensors="pt" ).to("cuda") with torch.no_grad(): outputs = model(**inputs) mask = processor.image_processor.post_process_masks( outputs.pred_masks.cpu(), inputs["original_sizes"].cpu(), inputs["reshaped_input_sizes"].cpu() )[0][0][0].numpy() ``` 이제 아래와 같이, 벌 주위의 바운딩 박스를 시각화할 수 있습니다. ```python import matplotlib.patches as patches fig, ax = plt.subplots() ax.imshow(image) rectangle = patches.Rectangle((2350, 1600), 500, 500, linewidth=2, edgecolor='r', facecolor='none') ax.add_patch(rectangle) ax.axis("off") plt.show() ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/bbox.png" alt="Visualized Bbox"/> </div> 아래에서 추론 결과를 확인할 수 있습니다. ```python fig, ax = plt.subplots() ax.imshow(image) ax.imshow(mask, cmap='viridis', alpha=0.4) ax.axis("off") plt.show() ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/box_inference.png" alt="Visualized Inference"/> </div>
transformers/docs/source/ko/tasks/mask_generation.md/0
{ "file_path": "transformers/docs/source/ko/tasks/mask_generation.md", "repo_id": "transformers", "token_count": 5655 }
324
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 🤗 Transformers로 작업을 해결하는 방법[[how-transformers-solve-tasks]] [🤗 Transformers로 할 수 있는 작업](task_summary)에서 자연어 처리(NLP), 음성 및 오디오, 컴퓨터 비전 작업 등의 중요한 응용을 배웠습니다. 이 페이지에서는 모델이 이러한 작업을 어떻게 해결하는지 자세히 살펴보고 내부에서 어떤 일이 일어나는지 설명합니다. 주어진 작업을 해결하는 많은 방법이 있으며, 일부 모델은 특정 기술을 구현하거나 심지어 새로운 방식으로 작업에 접근할 수도 있지만, Transformer 모델의 경우 일반적인 아이디어는 동일합니다. 유연한 아키텍처 덕분에 대부분의 모델은 인코더, 디코더 또는 인코더-디코더 구조의 변형입니다. Transformer 모델뿐만 아니라 우리의 라이브러리에는 오늘날 컴퓨터 비전 작업에 사용되는 몇 가지 합성곱 신경망(CNNs)도 있습니다. 또한, 우리는 현대 CNN의 작동 방식에 대해 설명할 것입니다. 작업이 어떻게 해결되는지 설명하기 위해, 유용한 예측을 출력하고자 모델 내부에서 어떤 일이 일어나는지 살펴봅니다. - 오디오 분류 및 자동 음성 인식(ASR)을 위한 [Wav2Vec2](model_doc/wav2vec2) - 이미지 분류를 위한 [Vision Transformer (ViT)](model_doc/vit) 및 [ConvNeXT](model_doc/convnext) - 객체 탐지를 위한 [DETR](model_doc/detr) - 이미지 분할을 위한 [Mask2Former](model_doc/mask2former) - 깊이 추정을 위한 [GLPN](model_doc/glpn) - 인코더를 사용하는 텍스트 분류, 토큰 분류 및 질의응답과 같은 NLP 작업을 위한 [BERT](model_doc/bert) - 디코더를 사용하는 텍스트 생성과 같은 NLP 작업을 위한 [GPT2](model_doc/gpt2) - 인코더-디코더를 사용하는 요약 및 번역과 같은 NLP 작업을 위한 [BART](model_doc/bart) <Tip> 더 나아가기 전에, 기존 Transformer 아키텍처에 대한 기본적인 지식을 숙지하는 것이 좋습니다. 인코더, 디코더 및 어텐션의 작동 방식을 알면 다양한 Transformer 모델이 어떻게 작동하는지 이해하는 데 도움이 됩니다. 시작 단계거나 복습이 필요한 경우, 더 많은 정보를 위해 [코스](https://huggingface.co/course/chapter1/4?fw=pt)를 확인하세요! </Tip> ## 음성 및 오디오[[speech-and-audio]] [Wav2Vec2](model_doc/wav2vec2)는 레이블이 지정되지 않은 음성 데이터에 대해 사전훈련된 모델로, 오디오 분류 및 자동 음성 인식을 위해 레이블이 지정된 데이터로 미세 조정합니다. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/wav2vec2_architecture.png"/> </div> 이 모델에는 4가지 주요 구성 요소가 있습니다: 1. *특징 인코더(feature encoder)*는 원시 오디오 파형(raw audio waveform)을 가져와서 제로 평균 및 단위 분산으로 표준화하고, 각각 20ms 길이의 특징 벡터의 시퀀스로 변환합니다. 2. 오디오 파형은 본질적으로 연속적이기 때문에, 텍스트 시퀀스를 단어로 나누는 것과 같이 분할할 수 없습니다. 그래서 *양자화 모듈(quantization module)*로 전달되는 특징 벡터는 이산형 음성 단위를 학습하기 위한 것입니다. 음성 단위는 *코드북(codebook)*(어휘집이라고 생각할 수 있습니다)이라는 코드단어(codewords) 콜렉션에서 선택됩니다. 코드북에서 연속적인 오디오 입력을 가장 잘 나타내는 벡터 또는 음성 단위가 선택되어 모델을 통과합니다. 3. 특징 벡터의 절반은 무작위로 마스크가 적용되며, 마스크된 특징 벡터는 *상대적 위치 임베딩*을 추가하는 Transformer 인코더인 *문맥 네트워크(context network)*로 전달됩니다. 4. 문맥 네트워크의 사전훈련 목표는 *대조적 작업(contrastive task)*입니다. 모델은 잘못된 예측 시퀀스에서 마스크된 예측의 실제 양자화된 음성 표현을 예측하며, 모델이 가장 유사한 컨텍스트 벡터와 양자화된 음성 단위(타겟 레이블)를 찾도록 권장합니다. 이제 wav2vec2가 사전훈련되었으므로, 오디오 분류 또는 자동 음성 인식을 위해 데이터에 맞춰 미세 조정할 수 있습니다! ### 오디오 분류[[audio-classification]] 사전훈련된 모델을 오디오 분류에 사용하려면, 기본 Wav2Vec2 모델 상단에 시퀀스 분류 헤드를 추가하면 됩니다. 분류 헤드는 인코더의 은닉 상태(hidden states)를 받는 선형 레이어입니다. 은닉 상태는 각각 길이가 다른 오디오 프레임에서 학습된 특징을 나타냅니다. 고정 길이의 벡터 하나를 만들기 위해, 은닉 상태는 먼저 풀링되고, 클래스 레이블에 대한 로짓으로 변환됩니다. 가장 가능성이 높은 클래스를 찾기 위해 로짓과 타겟 사이의 교차 엔트로피 손실이 계산됩니다. 오디오 분류에 직접 도전할 준비가 되셨나요? 완전한 [오디오 분류 가이드](tasks/audio_classification)를 확인하여 Wav2Vec2를 미세 조정하고 추론에 사용하는 방법을 학습하세요! ### 자동 음성 인식[[automatic-speech-recognition]] 사전훈련된 모델을 자동 음성 인식에 사용하려면, [연결주의적 시간 분류(CTC, Connectionist Temporal Classification)](glossary#connectionist-temporal-classification-ctc)를 위해 기본 Wav2Vec2 모델 상단에 언어 모델링 헤드를 추가합니다. 언어 모델링 헤드는 인코더의 은닉 상태를 받아서 로짓으로 변환합니다. 각 로짓은 토큰 클래스(토큰 수는 작업의 어휘에서 나타납니다)를 나타냅니다. CTC 손실은 텍스트로 디코딩된 토큰에서 가장 가능성이 높은 토큰 시퀀스를 찾기 위해 로짓과 타겟 사이에서 계산됩니다. 자동 음성 인식에 직접 도전할 준비가 되셨나요? 완전한 [자동 음성 인식 가이드](tasks/asr)를 확인하여 Wav2Vec2를 미세 조정하고 추론에 사용하는 방법을 학습하세요! ## 컴퓨터 비전[[computer-vision]] 컴퓨터 비전 작업에 접근하는 2가지 방법이 있습니다: 1. 이미지를 패치 시퀀스로 분리하고 Transformer로 병렬 처리합니다. 2. [ConvNeXT](model_doc/convnext)와 같은 현대 CNN을 사용합니다. 이는 합성곱 레이어를 기반으로 하지만 현대 네트워크 설계를 적용합니다. <Tip> 세 번째 방법은 Transformer와 합성곱(예를 들어, [Convolutional Vision Transformer](model_doc/cvt) 또는 [LeViT](model_doc/levit))을 결합하는 것입니다. 우리는 살펴볼 두 가지 방법만 결합하기 때문에 여기서 이 방법을 다루지 않습니다. </Tip> ViT와 ConvNeXT는 일반적으로 이미지 분류에서 사용되지만, 물체 감지, 분할, 깊이 추정과 같은 다른 비전 작업에는 각각 DETR, Mask2Former, GLPN이 더 적합하므로 이러한 모델을 살펴보겠습니다. ### 이미지 분류[[image-classification]] ViT와 ConvNeXT 모두 이미지 분류에 사용될 수 있지만, ViT는 어텐션 메커니즘을, ConvNeXT는 합성곱을 사용하는 것이 주된 차이입니다. #### Transformer[[transformer]] [ViT](model_doc/vit)은 합성곱을 전적으로 순수 Transformer 아키텍처로 대체합니다. 기존 Transformer에 익숙하다면, ViT를 이해하는 방법의 대부분을 이미 파악했다고 볼 수 있습니다. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/vit_architecture.jpg"/> </div> ViT가 도입한 주요 변경 사항은 이미지가 Transformer로 어떻게 전달되는지에 있습니다: 1. 이미지는 서로 중첩되지 않는 정사각형 패치로 분할되고, 각 패치는 벡터 또는 *패치 임베딩(patch embedding)*으로 변환됩니다. 패치 임베딩은 적절한 입력 차원을 만드는 2D 합성곱 계층에서 생성됩니다(기본 Transformer의 경우 각 패치의 임베딩마다 768개의 값이 필요합니다). 224x224 픽셀 이미지가 있다면, 16x16 이미지 패치 196개로 분할할 수 있습니다. 텍스트가 단어로 토큰화되는 것처럼, 이미지도 패치 시퀀스로 "토큰화"됩니다. 2. *학습 가능한 임베딩(learnable embedding)*(특수한 `[CLS]` 토큰)이 BERT와 같이 패치 임베딩의 시작 부분에 추가됩니다. `[CLS]` 토큰의 마지막 은닉 상태는 부착된 분류 헤드의 입력으로 사용되고, 다른 출력은 무시됩니다. 이 토큰은 모델이 이미지의 표현을 인코딩하는 방법을 학습하는 데 도움이 됩니다. 3. 패치와 학습 가능한 임베딩에 마지막으로 추가할 것은 *위치 임베딩*입니다. 왜냐하면 모델은 이미지 패치의 순서를 모르기 때문입니다. 위치 임베딩도 학습 가능하며, 패치 임베딩과 동일한 크기를 가집니다. 최종적으로, 모든 임베딩이 Transformer 인코더에 전달됩니다. 4. `[CLS]` 토큰을 포함한 출력은 다층 퍼셉트론 헤드(MLP)에 전달됩니다. ViT의 사전훈련 목표는 단순히 분류입니다. 다른 분류 헤드와 같이, MLP 헤드는 출력을 클래스 레이블에 대해 로짓으로 변환하고 교차 엔트로피 손실을 계산하여 가장 가능성이 높은 클래스를 찾습니다. 이미지 분류에 직접 도전할 준비가 되셨나요? 완전한 [이미지 분류 가이드](tasks/image_classification)를 확인하여 ViT를 미세 조정하고 추론에 사용하는 방법을 학습하세요! #### CNN[[cnn]] <Tip> 이 섹션에서는 합성곱에 대해 간략하게 설명합니다. 그러나 이미지의 모양과 크기가 어떻게 변화하는지에 대한 사전 이해가 있다면 도움이 될 것입니다. 합성곱에 익숙하지 않은 경우, fastai book의 [합성곱 신경망 챕터](https://github.com/fastai/fastbook/blob/master/13_convolutions.ipynb)를 확인하세요! </Tip> [ConvNeXT](model_doc/convnext)는 성능을 높이기 위해 새로운 현대 네트워크 설계를 적용한 CNN 구조입니다. 그러나 합성곱은 여전히 모델의 핵심입니다. 높은 수준의 관점에서 볼 때, [합성곱](glossary#convolution)은 작은 행렬(*커널*)에 이미지 픽셀의 작은 윈도우를 곱하는 연산입니다. 이는 특정 텍스쳐(texture)이나 선의 곡률과 같은 일부 특징을 계산합니다. 그러고 다음 픽셀 윈도우로 넘어가는데, 여기서 합성곱이 이동하는 거리를 *보폭(stride)*이라고 합니다. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/convolution.gif"/> </div> <small>패딩이나 보폭이 없는 기본 합성곱, <a href="https://arxiv.org/abs/1603.07285">딥러닝을 위한 합성곱 연산 가이드</a></small> 이 출력을 다른 합성곱 레이어에 전달할 수 있으며, 각 연속적인 레이어를 통해 네트워크는 핫도그나 로켓과 같이 더 복잡하고 추상적인 것을 학습합니다. 합성곱 레이어 사이에 풀링 레이어를 추가하여 차원을 줄이고 특징의 위치 변화에 대해 모델을 더 견고하게 만드는 것이 일반적입니다. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/convnext_architecture.png"/> </div> ConvNeXT는 CNN을 5가지 방식으로 현대화합니다: 1. 각 단계의 블록 수를 변경하고 더 큰 보폭과 그에 대응하는 커널 크기로 이미지를 "패치화(patchify)"합니다. 겹치지 않는 슬라이딩 윈도우는 ViT가 이미지를 패치로 분할하는 방법과 유사하게 이 패치화 전략을 만듭니다. 2. *병목(bottleneck)* 레이어는 채널 수를 줄였다가 다시 복원합니다. 왜냐하면 1x1 합성곱을 수행하는 것이 더 빠르고, 깊이를 늘릴 수 있기 때문입니다. 역 병목(inverted bottlenect)은 채널 수를 확장하고 축소함으로써 그 반대로 수행하므로, 메모리 효율이 더 높습니다. 3. 병목 레이어의 일반적인 3x3 합성곱 레이어를 각 입력 채널에 개별적으로 합성곱을 적용한 다음 마지막에 쌓는 *깊이별 합성곱(depthwise convolution)*으로 대체합니다. 이는 네트워크 폭이 넓혀 성능이 향상됩니다. 4. ViT는 어텐션 메커니즘 덕분에 한 번에 더 많은 이미지를 볼 수 있는 전역 수신 필드를 가지고 있습니다. ConvNeXT는 커널 크기를 7x7로 늘려 이 효과를 재현하려고 시도합니다. 5. 또한 ConvNeXT는 Transformer 모델을 모방하는 몇 가지 레이어 설계를 변경합니다. 활성화 및 정규화 레이어가 더 적고, 활성화 함수가 ReLU 대신 GELU로 전환되고, BatchNorm 대신 LayerNorm을 사용합니다. 합성곱 블록의 출력은 분류 헤드로 전달되며, 분류 헤드는 출력을 로짓으로 변환하고 교차 엔트로피 손실을 계산하여 가장 가능성이 높은 레이블을 찾습니다. ### 객체 탐지[[object-detection]] [DETR](model_doc/detr), *DEtection TRansformer*는 CNN과 Transformer 인코더-디코더를 결합한 종단간(end-to-end) 객체 탐지 모델입니다. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/detr_architecture.png"/> </div> 1. 사전훈련된 CNN *백본(backbone)*은 픽셀 값으로 나타낸 이미지를 가져와 저해상도 특징 맵을 만듭니다. 특징 맵에 대해 1x1 합성곱을 적용하여 차원을 줄이고, 고수준 이미지 표현을 가진 새로운 특징 맵을 생성합니다. Transformer는 시퀀스 모델이기 때문에 특징 맵을 위치 임베딩과 결합된 특징 벡터의 시퀀스로 평탄화합니다. 2. 특징 벡터는 어텐션 레이어를 사용하여 이미지 표현을 학습하는 인코더에 전달됩니다. 다음으로, 인코더의 은닉 상태는 디코더에서 *객체 쿼리*와 결합됩니다. 객체 쿼리는 이미지의 다른 영역에 초점을 맞춘 학습된 임베딩으로 학습되고, 각 어텐션 레이어를 진행하면서 갱신됩니다. 디코더의 은닉 상태는 각 객체 쿼리에 대한 바운딩 박스 좌표와 클래스 레이블을 예측하는 순방향 네트워크에 전달되며, 객체가 없는 경우 `no object`가 출력됩니다. DETR은 각 객체 쿼리를 병렬로 디코딩하여 *N* 개의 최종 예측을 출력합니다. 여기서 *N*은 쿼리 수입니다. 한 번에 하나의 요소를 예측하는 일반적인 자기회귀 모델과 달리, 객체 탐지는 한 번에 *N* 개의 예측을 수행하는 집합 예측 작업(`바운딩 박스`, `클래스 레이블`)입니다. 3. DETR은 훈련 중 *이분 매칭 손실(bipartite matching loss)*을 사용하여 고정된 수의 예측과 고정된 실제 정답 레이블(ground truth labels) 세트를 비교합니다. *N*개의 레이블 세트에 실제 정답 레이블보다 적은 경우, `no object` 클래스로 패딩됩니다. 이 손실 함수는 DETR이 예측과 실제 정답 레이블 간 1:1 대응을 찾도록 권장합니다. 바운딩 박스 또는 클래스 레이블 중 하나라도 잘못된 경우, 손실이 발생합니다. 마찬가지로, 존재하지 않는 객체를 예측하는 경우, 패널티를 받습니다. 이로 인해 DETR은 이미지에서 눈에 잘 띄는 물체 하나에 집중하는 대신, 다른 객체를 찾도록 권장됩니다. 객체 탐지 헤드가 DETR 상단에 추가되어 클래스 레이블과 바운딩 박스의 좌표를 찾습니다. 객체 탐지 헤드에는 두 가지 구성 요소가 있습니다: 디코더 은닉 상태를 클래스 레이블의 로짓으로 변환하는 선형 레이어 및 바운딩 박스를 예측하는 MLP 객체 탐지에 직접 도전할 준비가 되셨나요? 완전한 [객체 탐지 가이드](tasks/object_detection)를 확인하여 DETR을 미세 조정하고 추론에 사용하는 방법을 학습하세요! ### 이미지 분할[[image-segmentation]] [Mask2Former](model_doc/mask2former)는 모든 유형의 이미지 분할 작업을 해결하는 범용 아키텍처입니다. 전통적인 분할 모델은 일반적으로 시멘틱(semantic) 또는 파놉틱(panoptic) 분할과 같은 이미지 분할의 특정 하위 작업에 맞춰 조정됩니다. Mask2Former는 모든 작업을 *마스크 분류* 문제로 구성합니다. 마스크 분류는 픽셀을 *N*개 세그먼트로 그룹화하고, 주어진 이미지에 대해 *N*개의 마스크와 그에 대응하는 클래스 레이블을 예측합니다. 이 섹션에서 Mask2Former의 작동 방법을 설명한 다음, 마지막에 SegFormer를 미세 조정해볼 수 있습니다. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/mask2former_architecture.png"/> </div> Mask2Former에는 3가지 주요 구성 요소가 있습니다: 1. [Swin](model_doc/swin) 백본이 이미지를 받아 3개의 연속된 3x3 합성곱에서 저해상도 이미지 특징 맵을 생성합니다. 2. 특징 맵은 *픽셀 디코더*에 전달됩니다. 이 디코더는 저해상도 특징을 고해상도 픽셀 임베딩으로 점진적으로 업샘플링합니다. 픽셀 디코더는 실제로 원본 이미지의 1/32, 1/16, 1/8 해상도의 다중 스케일 특징(저해상도 및 고해상도 특징 모두 포함)을 생성합니다. 3. 이러한 서로 다른 크기의 특징 맵은 고해상도 특징에서 작은 객체를 포착하기 위해 한 번에 하나의 Transformer 디코더 레이어에 연속적으로 공급됩니다. Mask2Former의 핵심은 디코더의 *마스크 어텐션* 메커니즘입니다. 전체 이미지를 참조할 수 있는 크로스 어텐션(cross-attention)과 달리, 마스크 어텐션은 이미지의 특정 영역에만 집중합니다. 이는 이미지의 지역적 특징만으로 모델이 충분히 학습할 수 있기 때문에 더 빠르고 성능이 우수합니다. 4. [DETR](tasks_explained#object-detection)과 같이, Mask2Former는 학습된 객체 쿼리를 사용하고 이를 픽셀 디코더에서의 이미지 특징과 결합하여 예측 집합(`클래스 레이블`, `마스크 예측`)을 생성합니다. 디코더의 은닉 상태는 선형 레이어로 전달되어 클래스 레이블에 대한 로짓으로 변환됩니다. 로짓과 클래스 레이블 사이의 교차 엔트로피 손실을 계산하여 가장 가능성이 높은 것을 찾습니다. 마스크 예측은 픽셀 임베딩과 최종 디코더 은닉 상태를 결합하여 생성됩니다. 시그모이드 교차 엔트로피 및 Dice 손실은 로짓과 실제 정답 마스크(ground truth mask) 사이에서 계산되어 가장 가능성이 높은 마스크를 찾습니다. 이미지 분할에 직접 도전할 준비가 되셨나요? 완전한 [이미지 분할 가이드](tasks/semantic_segmentation)를 확인하여 SegFormer를 미세 조정하고 추론에 사용하는 방법을 학습하세요! ### 깊이 추정[[depth-estimation]] [GLPN](model_doc/glpn), *Global-Local Path Network*는 [SegFormer](model_doc/segformer) 인코더와 경량 디코더를 결합한 깊이 추정을 위한 Transformer입니다. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/glpn_architecture.jpg"/> </div> 1. ViT와 같이, 이미지는 패치 시퀀스로 분할되지만, 이미지 패치가 더 작다는 점이 다릅니다. 이는 세그멘테이션이나 깊이 추정과 같은 밀도 예측 작업에 더 적합합니다. 이미지 패치는 패치 임베딩으로 변환되어(패치 임베딩이 생성되는 방법은 [이미지 분류](#image-classification) 섹션을 참조하세요), 인코더로 전달됩니다. 2. 인코더는 패치 임베딩을 받아, 여러 인코더 블록에 전달합니다. 각 블록은 어텐션 및 Mix-FFN 레이어로 구성됩니다. 후자의 목적은 위치 정보를 제공하는 것입니다. 각 인코더 블록의 끝에는 계층적 표현을 생성하기 위한 *패치 병합(patch merging)* 레이어가 있습니다. 각 인접한 패치 그룹의 특징은 연결되고, 연결된 특징에 선형 레이어가 적용되어 패치 수를 1/4의 해상도로 줄입니다. 이는 다음 인코더 블록의 입력이 되며, 이러한 전체 프로세스는 1/8, 1/16, 1/32 해상도의 이미지 특징을 가질 때까지 반복됩니다. 3. 경량 디코더는 인코더에서 마지막 특징 맵(1/32 크기)을 가져와 1/16 크기로 업샘플링합니다. 여기서, 특징은 *선택적 특징 융합(SFF, Selective Feature Fusion)* 모듈로 전달됩니다. 이 모듈은 각 특징에 대해 어텐션 맵에서 로컬 및 전역 특징을 선택하고 결합한 다음, 1/8로 업샘플링합니다. 이 프로세스는 디코딩된 특성이 원본 이미지와 동일한 크기가 될 때까지 반복됩니다. 출력은 두 개의 합성곱 레이어를 거친 다음, 시그모이드 활성화가 적용되어 각 픽셀의 깊이를 예측합니다. ## 자연어처리[[natural-language-processing]] Transformer는 초기에 기계 번역을 위해 설계되었고, 그 이후로는 사실상 모든 NLP 작업을 해결하기 위한 기본 아키텍처가 되었습니다. 어떤 작업은 Transformer의 인코더 구조에 적합하며, 다른 작업은 디코더에 더 적합합니다. 또 다른 작업은 Transformer의 인코더-디코더 구조를 모두 활용합니다. ### 텍스트 분류[[text-classification]] [BERT](model_doc/bert)는 인코더 전용 모델이며, 텍스트의 풍부한 표현을 학습하기 위해 양방향의 단어에 주목함으로써 심층 양방향성(deep bidirectionality)을 효과적으로 구현한 최초의 모델입니다. 1. BERT는 [WordPiece](tokenizer_summary#wordpiece) 토큰화를 사용하여 문장의 토큰 임베딩을 생성합니다. 단일 문장과 한 쌍의 문장을 구분하기 위해 특수한 `[SEP]` 토큰이 추가됩니다. 모든 텍스트 시퀀스의 시작 부분에는 특수한 `[CLS]` 토큰이 추가됩니다. `[CLS]` 토큰이 있는 최종 출력은 분류 작업을 위한 분류 헤드로 입력에 사용됩니다. BERT는 또한 한 쌍의 문장에서 각 토큰이 첫 번째 문장인지 두 번째 문장에 속하는지 나타내는 세그먼트 임베딩(segment embedding)을 추가합니다. 2. BERT는 마스크드 언어 모델링과 다음 문장 예측, 두 가지 목적으로 사전훈련됩니다. 마스크드 언어 모델링에서는 입력 토큰의 일부가 무작위로 마스킹되고, 모델은 이를 예측해야 합니다. 이는 모델이 모든 단어를 보고 다음 단어를 "예측"할 수 있는 양방향성 문제를 해결합니다. 예측된 마스크 토큰의 최종 은닉 상태는 어휘에 대한 소프트맥스가 있는 순방향 네트워크로 전달되어 마스크된 단어를 예측합니다. 두 번째 사전훈련 대상은 다음 문장 예측입니다. 모델은 문장 B가 문장 A 다음에 오는지 예측해야 합니다. 문장 B가 다음 문장인 경우와 무작위 문장인 경우 각각 50%의 확률로 발생합니다. 다음 문장인지 아닌지에 대한 예측은 두 개의 클래스(`IsNext` 및 `NotNext`)에 대한 소프트맥스가 있는 순방향 네트워크로 전달됩니다. 3. 입력 임베딩은 여러 인코더 레이어를 거쳐서 최종 은닉 상태를 출력합니다. 사전훈련된 모델을 텍스트 분류에 사용하려면, 기본 BERT 모델 상단에 시퀀스 분류 헤드를 추가합니다. 시퀀스 분류 헤드는 최종 은닉 상태를 받는 선형 레이어이며, 로짓으로 변환하기 위해 선형 변환을 수행합니다. 교차 엔트로피 손실은 로짓과 타겟 간에 계산되어 가장 가능성이 높은 레이블을 찾습니다. 텍스트 분류에 직접 도전할 준비가 되셨나요? 완전한 [텍스트 분류 가이드](tasks/sequence_classification)를 확인하여 DistilBERT를 미세 조정하고 추론에 사용하는 방법을 학습하세요! ### 토큰 분류[[token-classification]] 개체명 인식(Named Entity Recognition, NER)과 같은 토큰 분류 작업에 BERT를 사용하려면, 기본 BERT 모델 상단에 토큰 분류 헤드를 추가합니다. 토큰 분류 헤드는 최종 은닉 상태를 받는 선형 레이어이며, 로짓으로 변환하기 위해 선형 변환을 수행합니다. 교차 엔트로피 손실은 로짓과 각 토큰 간에 계산되어 가장 가능성이 높은 레이블을 찾습니다. 토큰 분류에 직접 도전할 준비가 되셨나요? 완전한 [토큰 분류 가이드](tasks/token_classification)를 확인하여 DistilBERT를 미세 조정하고 추론에 사용하는 방법을 학습하세요! ### 질의응답[[question-answering]] 질의응답에 BERT를 사용하려면, 기본 BERT 모델 위에 스팬(span) 분류 헤드를 추가합니다. 이 선형 레이어는 최종 은닉 상태를 받고, 답변에 대응하는 `스팬`의 시작과 끝 로그를 계산하기 위해 선형 변환을 수행합니다. 교차 엔트로피 손실은 로짓과 각 레이블 위치 간에 계산되어 답변에 대응하는 가장 가능성이 높은 텍스트의 스팬을 찾습니다. 질의응답에 직접 도전할 준비가 되셨나요? 완전한 [질의응답 가이드](tasks/question_answering)를 확인하여 DistilBERT를 미세 조정하고 추론에 사용하는 방법을 학습하세요! <Tip> 💡 사전훈련된 BERT를 다양한 작업에 사용하는 것이 얼마나 쉬운지 주목하세요. 사전훈련된 모델에 특정 헤드를 추가하기만 하면 은닉 상태를 원하는 출력으로 조작할 수 있습니다! </Tip> ### 텍스트 생성[[text-generation]] [GPT-2](model_doc/gpt2)는 대량의 텍스트에 대해 사전훈련된 디코딩 전용 모델입니다. 프롬프트를 주어지면 설득력 있는 (항상 사실은 아니지만!) 텍스트를 생성하고 명시적으로 훈련되지 않았음에도 불구하고 질의응답과 같은 다른 NLP 작업을 완수할 수 있습니다. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/gpt2_architecture.png"/> </div> 1. GPT-2는 단어를 토큰화하고 토큰 임베딩을 생성하기 위해 [바이트 페어 인코딩(BPE, byte pair encoding)](tokenizer_summary#bytepair-encoding-bpe)을 사용합니다. 위치 인코딩은 시퀀스에서 각 토큰의 위치를 나타내기 위해 토큰 임베딩에 추가됩니다. 입력 임베딩은 여러 디코더 블록을 거쳐 일부 최종 은닉 상태를 출력합니다. 각 디코더 블록 내에서 GPT-2는 *마스크드 셀프 어텐션(masked self-attention)* 레이어를 사용합니다. 이는 GPT-2가 이후 토큰(future tokens)에 주의를 기울일 수 없도록 합니다. 왼쪽에 있는 토큰에만 주의를 기울일 수 있습니다. 마스크드 셀프 어텐션에서는 어텐션 마스크를 사용하여 이후 토큰에 대한 점수(score)를 `0`으로 설정하기 때문에 BERT의 [`mask`] 토큰과 다릅니다. 2. 디코더의 출력은 언어 모델링 헤드에 전달되며, 언어 모델링 헤드는 은닉 상태를 로짓으로 선형 변환을 수행합니다. 레이블은 시퀀스의 다음 토큰으로, 로짓을 오른쪽으로 하나씩 이동하여 생성됩니다. 교차 엔트로피 손실은 이동된 로짓과 레이블 간에 계산되어 가장 가능성이 높은 다음 토큰을 출력합니다. GPT-2의 사전훈련 목적은 전적으로 [인과적 언어 모델링](glossary#causal-language-modeling)에 기반하여, 시퀀스에서 다음 단어를 예측하는 것입니다. 이는 GPT-2가 텍스트 생성에 관련된 작업에 특히 우수하도록 합니다. 텍스트 생성에 직접 도전할 준비가 되셨나요? 완전한 [인과적 언어 모델링 가이드](tasks/language_modeling#causal-language-modeling)를 확인하여 DistilGPT-2를 미세 조정하고 추론에 사용하는 방법을 학습하세요! <Tip> 텍스트 생성에 대한 자세한 내용은 [텍스트 생성 전략](generation_strategies) 가이드를 확인하세요! </Tip> ### 요약[[summarization]] [BART](model_doc/bart) 및 [T5](model_doc/t5)와 같은 인코더-디코더 모델은 요약 작업의 시퀀스-투-시퀀스 패턴을 위해 설계되었습니다. 이 섹션에서 BART의 작동 방법을 설명한 다음, 마지막에 T5를 미세 조정해볼 수 있습니다. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bart_architecture.png"/> </div> 1. BART의 인코더 아키텍처는 BERT와 매우 유사하며 텍스트의 토큰 및 위치 임베딩을 받습니다. BART는 입력을 변형시키고 디코더로 재구성하여 사전훈련됩니다. 특정 변형 기법이 있는 다른 인코더와는 달리, BART는 모든 유형의 변형을 적용할 수 있습니다. 그러나 *text infilling* 변형 기법이 가장 잘 작동합니다. Text Infiling에서는 여러 텍스트 스팬을 **단일** [`mask`] 토큰으로 대체합니다. 이는 모델이 마스크된 토큰을 예측해야 하고, 모델에 누락된 토큰의 수를 예측하도록 가르치기 때문에 중요합니다. 입력 임베딩과 마스크된 스팬이 인코더를 거쳐 최종 은닉 상태를 출력하지만, BERT와 달리 BART는 마지막에 단어를 예측하는 순방향 네트워크를 추가하지 않습니다. 2. 인코더의 출력은 디코더로 전달되며, 디코더는 인코더의 출력에서 마스크 토큰과 변형되지 않은 토큰을 예측해야 합니다. 이는 디코더가 원본 텍스트를 복원하는 데 도움이 되는 추가적인 문맥을 얻도록 합니다. 디코더의 출력은 언어 모델링 헤드에 전달되며, 언어 모델링 헤드는 은닉 상태를 로짓으로 선형 변환을 수행합니다. 교차 엔트로피 손실은 로짓과 토큰이 오른쪽으로 이동된 레이블 간에 계산됩니다. 요약에 직접 도전할 준비가 되셨나요? 완전한 [요약 가이드](tasks/summarization)를 확인하여 T5를 미세 조정하고 추론에 사용하는 방법을 학습하세요! <Tip> 텍스트 생성에 대한 자세한 내용은 [텍스트 생성 전략](generation_strategies) 가이드를 확인하세요! </Tip> ### 번역[[translation]] 번역은 시퀀스-투-시퀀스 작업의 또 다른 예로, [BART](model_doc/bart) 또는 [T5](model_doc/t5)와 같은 인코더-디코더 모델을 사용할 수 있습니다. 이 섹션에서 BART의 작동 방법을 설명한 다음, 마지막에 T5를 미세 조정해볼 수 있습니다. BART는 원천 언어를 타겟 언어로 디코딩할 수 있는 입력에 매핑하기 위해 무작위로 초기화된 별도의 인코더를 추가하여 번역에 적용합니다. 이 새로운 인코더의 임베딩은 원본 단어 임베딩 대신 사전훈련된 인코더로 전달됩니다. 원천 인코더는 모델 출력의 교차 엔트로피 손실로부터 원천 인코더, 위치 임베딩, 입력 임베딩을 갱신하여 훈련됩니다. 첫 번째 단계에서는 모델 파라미터가 고정되고, 두 번째 단계에서는 모든 모델 파라미터가 함께 훈련됩니다. BART는 이후 번역을 위해 다양한 언어로 사전훈련된 다국어 버전의 mBART로 확장되었습니다. 번역에 직접 도전할 준비가 되셨나요? 완전한 [번역 가이드](tasks/summarization)를 확인하여 T5를 미세 조정하고 추론에 사용하는 방법을 학습하세요! <Tip> 텍스트 생성에 대한 자세한 내용은 [텍스트 생성 전략](generation_strategies) 가이드를 확인하세요! </Tip>
transformers/docs/source/ko/tasks_explained.md/0
{ "file_path": "transformers/docs/source/ko/tasks_explained.md", "repo_id": "transformers", "token_count": 25797 }
325
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Criar uma arquitetura customizada Uma [`AutoClass`](model_doc/auto) automaticamente infere a arquitetura do modelo e baixa configurações e pesos pré-treinados. Geralmente, nós recomendamos usar uma `AutoClass` para produzir um código independente de checkpoints. Mas usuários que querem mais contole sobre parâmetros específicos do modelo pode criar um modelo customizado 🤗 Transformers a partir de algumas classes bases. Isso pode ser particulamente útil para alguém que está interessado em estudar, treinar ou fazer experimentos com um modelo 🤗 Transformers. Nesse tutorial, será explicado como criar um modelo customizado sem uma `AutoClass`. Aprenda como: - Carregar e customizar a configuração de um modelo. - Criar a arquitetura de um modelo. - Criar um tokenizer rápido e devagar para textos. - Criar extrator de features para tarefas envolvendo audio e imagem. - Criar um processador para tarefas multimodais. ## configuration A [configuration](main_classes/configuration) refere-se a atributos específicos de um modelo. Cada configuração de modelo tem atributos diferentes; por exemplo, todos modelo de PLN possuem os atributos `hidden_size`, `num_attention_heads`, `num_hidden_layers` e `vocab_size` em comum. Esse atributos especificam o numero de 'attention heads' ou 'hidden layers' para construir um modelo. Dê uma olhada a mais em [DistilBERT](model_doc/distilbert) acessando [`DistilBertConfig`] para observar esses atributos: ```py >>> from transformers import DistilBertConfig >>> config = DistilBertConfig() >>> print(config) DistilBertConfig { "activation": "gelu", "attention_dropout": 0.1, "dim": 768, "dropout": 0.1, "hidden_dim": 3072, "initializer_range": 0.02, "max_position_embeddings": 512, "model_type": "distilbert", "n_heads": 12, "n_layers": 6, "pad_token_id": 0, "qa_dropout": 0.1, "seq_classif_dropout": 0.2, "sinusoidal_pos_embds": false, "transformers_version": "4.16.2", "vocab_size": 30522 } ``` [`DistilBertConfig`] mostra todos os atributos padrões usados para construir um [`DistilBertModel`] base. Todos atributos são customizáveis, o que cria espaço para experimentos. Por exemplo, você pode customizar um modelo padrão para: - Tentar uma função de ativação diferente com o parâmetro `activation`. - Usar uma taxa de desistência maior para as probabilidades de 'attention' com o parâmetro `attention_dropout`. ```py >>> my_config = DistilBertConfig(activation="relu", attention_dropout=0.4) >>> print(my_config) DistilBertConfig { "activation": "relu", "attention_dropout": 0.4, "dim": 768, "dropout": 0.1, "hidden_dim": 3072, "initializer_range": 0.02, "max_position_embeddings": 512, "model_type": "distilbert", "n_heads": 12, "n_layers": 6, "pad_token_id": 0, "qa_dropout": 0.1, "seq_classif_dropout": 0.2, "sinusoidal_pos_embds": false, "transformers_version": "4.16.2", "vocab_size": 30522 } ``` Atributos de um modelo pré-treinado podem ser modificados na função [`~PretrainedConfig.from_pretrained`]: ```py >>> my_config = DistilBertConfig.from_pretrained("distilbert/distilbert-base-uncased", activation="relu", attention_dropout=0.4) ``` Uma vez que você está satisfeito com as configurações do seu modelo, você consegue salvar elas com [`~PretrainedConfig.save_pretrained`]. Seu arquivo de configurações está salvo como um arquivo JSON no diretório especificado: ```py >>> my_config.save_pretrained(save_directory="./your_model_save_path") ``` Para reusar o arquivo de configurações, carregue com [`~PretrainedConfig.from_pretrained`]: ```py >>> my_config = DistilBertConfig.from_pretrained("./your_model_save_path/my_config.json") ``` <Tip> Você pode também salvar seu arquivo de configurações como um dicionário ou até mesmo com a diferença entre as seus atributos de configuração customizados e os atributos de configuração padrões! Olhe a documentação [configuration](main_classes/configuration) para mais detalhes. </Tip> ## Modelo O próximo passo é criar um [model](main_classes/models). O modelo - também vagamente referido como arquitetura - define o que cada camada está fazendo e quais operações estão acontecendo. Atributos como `num_hidden_layers` das configurações são utilizados para definir a arquitetura. Todo modelo compartilha a classe base [`PreTrainedModel`] e alguns métodos em comum como redimensionar o tamanho dos embeddings de entrada e podar as 'self-attention heads'. Além disso, todos os modelos também são subclasses de [`torch.nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html), [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) ou [`flax.linen.Module`](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html). Isso significa que os modelos são compatíveis com cada respectivo uso de framework. <frameworkcontent> <pt> Carregar seus atributos de configuração customizados em um modelo: ```py >>> from transformers import DistilBertModel >>> my_config = DistilBertConfig.from_pretrained("./your_model_save_path/my_config.json") >>> model = DistilBertModel(my_config) ``` Isso cria um modelo com valores aleatórios ao invés de pré-treinar os pesos. Você não irá conseguir usar usar esse modelo para nada útil ainda, até você treinar ele. Treino é um processo caro e demorado. Geralmente é melhor utilizar um modelo pré-treinado para obter melhores resultados mais rápido, enquanto usa apenas uma fração dos recursos necessários para treinar. Criar um modelo pré-treinado com [`~PreTrainedModel.from_pretrained`]: ```py >>> model = DistilBertModel.from_pretrained("distilbert/distilbert-base-uncased") ``` Quando você carregar os pesos pré-treinados, a configuração padrão do modelo é automaticamente carregada se o modelo é provido pelo 🤗 Transformers. No entanto, você ainda consegue mudar - alguns ou todos - os atributos padrões de configuração do modelo com os seus próprio atributos, se você preferir: ```py >>> model = DistilBertModel.from_pretrained("distilbert/distilbert-base-uncased", config=my_config) ``` </pt> <tf> Carregar os seus próprios atributos padrões de contiguração no modelo: ```py >>> from transformers import TFDistilBertModel >>> my_config = DistilBertConfig.from_pretrained("./your_model_save_path/my_config.json") >>> tf_model = TFDistilBertModel(my_config) ``` Isso cria um modelo com valores aleatórios ao invés de pré-treinar os pesos. Você não irá conseguir usar usar esse modelo para nada útil ainda, até você treinar ele. Treino é um processo caro e demorado. Geralmente é melhor utilizar um modelo pré-treinado para obter melhores resultados mais rápido, enquanto usa apenas uma fração dos recursos necessários para treinar. Criar um modelo pré-treinado com [`~TFPreTrainedModel.from_pretrained`]: ```py >>> tf_model = TFDistilBertModel.from_pretrained("distilbert/distilbert-base-uncased") ``` Quando você carregar os pesos pré-treinados, a configuração padrão do modelo é automaticamente carregada se o modelo é provido pelo 🤗 Transformers. No entanto, você ainda consegue mudar - alguns ou todos - os atributos padrões de configuração do modelo com os seus próprio atributos, se você preferir: ```py >>> tf_model = TFDistilBertModel.from_pretrained("distilbert/distilbert-base-uncased", config=my_config) ``` </tf> </frameworkcontent> ### Heads do modelo Neste ponto, você tem um modelo básico do DistilBERT que gera os *estados ocultos*. Os estados ocultos são passados como entrada para a head do moelo para produzir a saída final. 🤗 Transformers fornece uma head de modelo diferente para cada tarefa desde que o modelo suporte essa tarefa (por exemplo, você não consegue utilizar o modelo DistilBERT para uma tarefa de 'sequence-to-sequence' como tradução). <frameworkcontent> <pt> Por exemplo, [`DistilBertForSequenceClassification`] é um modelo DistilBERT base com uma head de classificação de sequência. A head de calssificação de sequência é uma camada linear no topo das saídas agrupadas. ```py >>> from transformers import DistilBertForSequenceClassification >>> model = DistilBertForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` Reutilize facilmente esse ponto de parada para outra tarefe mudando para uma head de modelo diferente. Para uma tarefe de responder questões, você usaria a head do modelo [`DistilBertForQuestionAnswering`]. A head de responder questões é similar com a de classificação de sequências exceto o fato de que ela é uma camada no topo dos estados das saídas ocultas. ```py >>> from transformers import DistilBertForQuestionAnswering >>> model = DistilBertForQuestionAnswering.from_pretrained("distilbert/distilbert-base-uncased") ``` </pt> <tf> Por exemplo, [`TFDistilBertForSequenceClassification`] é um modelo DistilBERT base com uma head de classificação de sequência. A head de calssificação de sequência é uma camada linear no topo das saídas agrupadas. ```py >>> from transformers import TFDistilBertForSequenceClassification >>> tf_model = TFDistilBertForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` Reutilize facilmente esse ponto de parada para outra tarefe mudando para uma head de modelo diferente. Para uma tarefe de responder questões, você usaria a head do modelo [`TFDistilBertForQuestionAnswering`]. A head de responder questões é similar com a de classificação de sequências exceto o fato de que ela é uma camada no topo dos estados das saídas ocultas. ```py >>> from transformers import TFDistilBertForQuestionAnswering >>> tf_model = TFDistilBertForQuestionAnswering.from_pretrained("distilbert/distilbert-base-uncased") ``` </tf> </frameworkcontent> ## Tokenizer A útlima classe base que você precisa antes de usar um modelo para dados textuais é a [tokenizer](main_classes/tokenizer) para converter textos originais para tensores. Existem dois tipos de tokenizers que você pode usar com 🤗 Transformers: - [`PreTrainedTokenizer`]: uma implementação em Python de um tokenizer. - [`PreTrainedTokenizerFast`]: um tokenizer da nossa biblioteca [🤗 Tokenizer](https://huggingface.co/docs/tokenizers/python/latest/) baseada em Rust. Esse tipo de tokenizer é significantemente mais rapido - especialmente durante tokenization de codificação - devido a implementação em Rust. O tokenizer rápido tambem oferece métodos adicionais como *offset mapping* que mapeia tokens para suar palavras ou caracteres originais. Os dois tokenizers suporta métodos comuns como os de codificar e decodificar, adicionar novos tokens, e gerenciar tokens especiais. <Tip warning={true}> Nem todo modelo suporta um 'fast tokenizer'. De uma olhada aqui [table](index#supported-frameworks) pra checar se um modelo suporta 'fast tokenizer'. </Tip> Se você treinou seu prórpio tokenizer, você pode criar um a partir do seu arquivo *vocabulary*: ```py >>> from transformers import DistilBertTokenizer >>> my_tokenizer = DistilBertTokenizer(vocab_file="my_vocab_file.txt", do_lower_case=False, padding_side="left") ``` É importante lembrar que o vocabulário de um tokenizer customizado será diferente de um vocabulário gerado pelo tokenizer de um modelo pré treinado. Você precisa usar o vocabulário de um modelo pré treinado se você estiver usando um modelo pré treinado, caso contrário as entradas não farão sentido. Criando um tokenizer com um vocabulário de um modelo pré treinado com a classe [`DistilBertTokenizer`]: ```py >>> from transformers import DistilBertTokenizer >>> slow_tokenizer = DistilBertTokenizer.from_pretrained("distilbert/distilbert-base-uncased") ``` Criando um 'fast tokenizer' com a classe [`DistilBertTokenizerFast`]: ```py >>> from transformers import DistilBertTokenizerFast >>> fast_tokenizer = DistilBertTokenizerFast.from_pretrained("distilbert/distilbert-base-uncased") ``` <Tip> Pos padrão, [`AutoTokenizer`] tentará carregar um 'fast tokenizer'. Você pode disabilitar esse comportamento colocando `use_fast=False` no `from_pretrained`. </Tip> ## Extrator de features Um extrator de features processa entradas de imagem ou áudio. Ele herda da classe base [`~feature_extraction_utils.FeatureExtractionMixin`], e pode também herdar da classe [`ImageFeatureExtractionMixin`] para processamento de features de imagem ou da classe [`SequenceFeatureExtractor`] para processamento de entradas de áudio. Dependendo do que você está trabalhando em um audio ou uma tarefa de visão, crie um estrator de features associado com o modelo que você está usando. Por exemplo, crie um [`ViTFeatureExtractor`] padrão se você estiver usando [ViT](model_doc/vit) para classificação de imagens: ```py >>> from transformers import ViTFeatureExtractor >>> vit_extractor = ViTFeatureExtractor() >>> print(vit_extractor) ViTFeatureExtractor { "do_normalize": true, "do_resize": true, "feature_extractor_type": "ViTFeatureExtractor", "image_mean": [ 0.5, 0.5, 0.5 ], "image_std": [ 0.5, 0.5, 0.5 ], "resample": 2, "size": 224 } ``` <Tip> Se você não estiver procurando por nenhuma customização, apenas use o método `from_pretrained` para carregar parâmetros do modelo de extrator de features padrão. </Tip> Modifique qualquer parâmetro dentre os [`ViTFeatureExtractor`] para criar seu extrator de features customizado. ```py >>> from transformers import ViTFeatureExtractor >>> my_vit_extractor = ViTFeatureExtractor(resample="PIL.Image.BOX", do_normalize=False, image_mean=[0.3, 0.3, 0.3]) >>> print(my_vit_extractor) ViTFeatureExtractor { "do_normalize": false, "do_resize": true, "feature_extractor_type": "ViTFeatureExtractor", "image_mean": [ 0.3, 0.3, 0.3 ], "image_std": [ 0.5, 0.5, 0.5 ], "resample": "PIL.Image.BOX", "size": 224 } ``` Para entradas de áutio, você pode criar um [`Wav2Vec2FeatureExtractor`] e customizar os parâmetros de uma forma similar: ```py >>> from transformers import Wav2Vec2FeatureExtractor >>> w2v2_extractor = Wav2Vec2FeatureExtractor() >>> print(w2v2_extractor) Wav2Vec2FeatureExtractor { "do_normalize": true, "feature_extractor_type": "Wav2Vec2FeatureExtractor", "feature_size": 1, "padding_side": "right", "padding_value": 0.0, "return_attention_mask": false, "sampling_rate": 16000 } ``` ## Processor Para modelos que suportam tarefas multimodais, 🤗 Transformers oferece uma classe processadora que convenientemente cobre um extrator de features e tokenizer dentro de um único objeto. Por exemplo, vamos usar o [`Wav2Vec2Processor`] para uma tarefa de reconhecimento de fala automática (ASR). ASR transcreve áudio para texto, então você irá precisar de um extrator de um features e um tokenizer. Crie um extrator de features para lidar com as entradas de áudio. ```py >>> from transformers import Wav2Vec2FeatureExtractor >>> feature_extractor = Wav2Vec2FeatureExtractor(padding_value=1.0, do_normalize=True) ``` Crie um tokenizer para lidar com a entrada de textos: ```py >>> from transformers import Wav2Vec2CTCTokenizer >>> tokenizer = Wav2Vec2CTCTokenizer(vocab_file="my_vocab_file.txt") ``` Combine o extrator de features e o tokenizer no [`Wav2Vec2Processor`]: ```py >>> from transformers import Wav2Vec2Processor >>> processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer) ``` Com duas classes básicas - configuração e modelo - e um preprocessamento de classe adicional (tokenizer, extrator de features, ou processador), você pode criar qualquer modelo que suportado por 🤗 Transformers. Qualquer uma dessas classes base são configuráveis, te permitindo usar os atributos específicos que você queira. Você pode facilmente preparar um modelo para treinamento ou modificar um modelo pré-treinado com poucas mudanças.
transformers/docs/source/pt/create_a_model.md/0
{ "file_path": "transformers/docs/source/pt/create_a_model.md", "repo_id": "transformers", "token_count": 6000 }
326
- sections: - local: index title: 🤗 Transformers title: Get started
transformers/docs/source/tr/_toctree.yml/0
{ "file_path": "transformers/docs/source/tr/_toctree.yml", "repo_id": "transformers", "token_count": 25 }
327
<!--- Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 安装 为你正在使用的深度学习框架安装 🤗 Transformers、设置缓存,并选择性配置 🤗 Transformers 以离线运行。 🤗 Transformers 已在 Python 3.6+、PyTorch 1.1.0+、TensorFlow 2.0+ 以及 Flax 上进行测试。针对你使用的深度学习框架,请参照以下安装说明进行安装: * [PyTorch](https://pytorch.org/get-started/locally/) 安装说明。 * [TensorFlow 2.0](https://www.tensorflow.org/install/pip) 安装说明。 * [Flax](https://flax.readthedocs.io/en/latest/) 安装说明。 ## 使用 pip 安装 你应该使用 [虚拟环境](https://docs.python.org/3/library/venv.html) 安装 🤗 Transformers。如果你不熟悉 Python 虚拟环境,请查看此 [教程](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/)。使用虚拟环境,你可以轻松管理不同项目,避免不同依赖项之间的兼容性问题。 首先,在项目目录中创建虚拟环境: ```bash python -m venv .env ``` 在 Linux 和 MacOs 系统中激活虚拟环境: ```bash source .env/bin/activate ``` 在 Windows 系统中激活虚拟环境: ```bash .env/Scripts/activate ``` 现在你可以使用以下命令安装 🤗 Transformers: ```bash pip install transformers ``` 若仅需 CPU 支持,可以使用单行命令方便地安装 🤗 Transformers 和深度学习库。例如,使用以下命令安装 🤗 Transformers 和 PyTorch: ```bash pip install 'transformers[torch]' ``` 🤗 Transformers 和 TensorFlow 2.0: ```bash pip install 'transformers[tf-cpu]' ``` <Tip warning={true}> M1 / ARM用户 在安装 TensorFlow 2.0 前,你需要安装以下库: ```bash brew install cmake brew install pkg-config ``` </Tip> 🤗 Transformers 和 Flax: ```bash pip install 'transformers[flax]' ``` 最后,运行以下命令以检查 🤗 Transformers 是否已被正确安装。该命令将下载一个预训练模型: ```bash python -c "from transformers import pipeline; print(pipeline('sentiment-analysis')('we love you'))" ``` 然后打印标签以及分数: ```bash [{'label': 'POSITIVE', 'score': 0.9998704791069031}] ``` ## 源码安装 使用以下命令从源码安装 🤗 Transformers: ```bash pip install git+https://github.com/huggingface/transformers ``` 此命令下载的是最新的前沿 `main` 版本而不是最新的 `stable` 版本。`main` 版本适用于跟最新开发保持一致。例如,上次正式版发布带来的 bug 被修复了,但新版本尚未被推出。但是,这也说明 `main` 版本并不一定总是稳定的。我们努力保持 `main` 版本的可操作性,大多数问题通常在几个小时或一天以内就能被解决。如果你遇到问题,请提个 [Issue](https://github.com/huggingface/transformers/issues) 以便我们能更快修复。 运行以下命令以检查 🤗 Transformers 是否已被正确安装: ```bash python -c "from transformers import pipeline; print(pipeline('sentiment-analysis')('I love you'))" ``` ## 可编辑安装 如果你有下列需求,需要进行可编辑安装: * 使用源码的 `main` 版本。 * 为 🤗 Transformers 贡献代码,需要测试代码中的更改。 使用以下命令克隆仓库并安装 🤗 Transformers: ```bash git clone https://github.com/huggingface/transformers.git cd transformers pip install -e . ``` 这些命令将会链接你克隆的仓库以及你的 Python 库路径。现在,Python 不仅会在正常的库路径中搜索库,也会在你克隆到的文件夹中进行查找。例如,如果你的 Python 包通常本应安装在 `~/anaconda3/envs/main/lib/python3.7/site-packages/` 目录中,在这种情况下 Python 也会搜索你克隆到的文件夹:`~/transformers/`。 <Tip warning={true}> 如果你想继续使用这个库,必须保留 `transformers` 文件夹。 </Tip> 现在,你可以使用以下命令,将你克隆的 🤗 Transformers 库轻松更新至最新版本: ```bash cd ~/transformers/ git pull ``` 你的 Python 环境将在下次运行时找到 `main` 版本的 🤗 Transformers。 ## 使用 conda 安装 从 conda 的 `conda-forge` 频道安装: ```bash conda install conda-forge::transformers ``` ## 缓存设置 预训练模型会被下载并本地缓存到 `~/.cache/huggingface/hub`。这是由环境变量 `TRANSFORMERS_CACHE` 指定的默认目录。在 Windows 上,默认目录为 `C:\Users\username\.cache\huggingface\hub`。你可以按照不同优先级改变下述环境变量,以指定不同的缓存目录。 1. 环境变量(默认): `HUGGINGFACE_HUB_CACHE` 或 `TRANSFORMERS_CACHE`。 2. 环境变量 `HF_HOME`。 3. 环境变量 `XDG_CACHE_HOME` + `/huggingface`。 <Tip> 除非你明确指定了环境变量 `TRANSFORMERS_CACHE`,🤗 Transformers 将可能会使用较早版本设置的环境变量 `PYTORCH_TRANSFORMERS_CACHE` 或 `PYTORCH_PRETRAINED_BERT_CACHE`。 </Tip> ## 离线模式 🤗 Transformers 可以仅使用本地文件在防火墙或离线环境中运行。设置环境变量 `HF_HUB_OFFLINE=1` 以启用该行为。 <Tip> 通过设置环境变量 `HF_DATASETS_OFFLINE=1` 将 [🤗 Datasets](https://huggingface.co/docs/datasets/) 添加至你的离线训练工作流程中。 </Tip> 例如,你通常会使用以下命令对外部实例进行防火墙保护的的普通网络上运行程序: ```bash python examples/pytorch/translation/run_translation.py --model_name_or_path google-t5/t5-small --dataset_name wmt16 --dataset_config ro-en ... ``` 在离线环境中运行相同的程序: ```bash HF_DATASETS_OFFLINE=1 HF_HUB_OFFLINE=1 \ python examples/pytorch/translation/run_translation.py --model_name_or_path google-t5/t5-small --dataset_name wmt16 --dataset_config ro-en ... ``` 现在脚本可以应该正常运行,而无需挂起或等待超时,因为它知道只应查找本地文件。 ### 获取离线时使用的模型和分词器 另一种离线时使用 🤗 Transformers 的方法是预先下载好文件,然后在需要离线使用时指向它们的离线路径。有三种实现的方法: * 单击 [Model Hub](https://huggingface.co/models) 用户界面上的 ↓ 图标下载文件。 ![下载图标](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/download-icon.png) * 使用 [`PreTrainedModel.from_pretrained`] 和 [`PreTrainedModel.save_pretrained`] 工作流程: 1. 预先使用 [`PreTrainedModel.from_pretrained`] 下载文件: ```py >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM >>> tokenizer = AutoTokenizer.from_pretrained("bigscience/T0_3B") >>> model = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0_3B") ``` 2. 使用 [`PreTrainedModel.save_pretrained`] 将文件保存至指定目录: ```py >>> tokenizer.save_pretrained("./your/path/bigscience_t0") >>> model.save_pretrained("./your/path/bigscience_t0") ``` 3. 现在,你可以在离线时从指定目录使用 [`PreTrainedModel.from_pretrained`] 重新加载你的文件: ```py >>> tokenizer = AutoTokenizer.from_pretrained("./your/path/bigscience_t0") >>> model = AutoModel.from_pretrained("./your/path/bigscience_t0") ``` * 使用代码用 [huggingface_hub](https://github.com/huggingface/huggingface_hub/tree/main/src/huggingface_hub) 库下载文件: 1. 在你的虚拟环境中安装 `huggingface_hub` 库: ```bash python -m pip install huggingface_hub ``` 2. 使用 [`hf_hub_download`](https://huggingface.co/docs/hub/adding-a-library#download-files-from-the-hub) 函数将文件下载到指定路径。例如,以下命令将 `config.json` 文件从 [T0](https://huggingface.co/bigscience/T0_3B) 模型下载至你想要的路径: ```py >>> from huggingface_hub import hf_hub_download >>> hf_hub_download(repo_id="bigscience/T0_3B", filename="config.json", cache_dir="./your/path/bigscience_t0") ``` 下载完文件并在本地缓存后,指定其本地路径以加载和使用该模型: ```py >>> from transformers import AutoConfig >>> config = AutoConfig.from_pretrained("./your/path/bigscience_t0/config.json") ``` <Tip> 请参阅 [如何从 Hub 下载文件](https://huggingface.co/docs/hub/how-to-downstream) 部分,获取有关下载存储在 Hub 上文件的更多详细信息。 </Tip>
transformers/docs/source/zh/installation.md/0
{ "file_path": "transformers/docs/source/zh/installation.md", "repo_id": "transformers", "token_count": 4837 }
328
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 使用 🤗 PEFT 加载adapters [[open-in-colab]] [参数高效微调(PEFT)方法](https://huggingface.co/blog/peft)在微调过程中冻结预训练模型的参数,并在其顶部添加少量可训练参数(adapters)。adapters被训练以学习特定任务的信息。这种方法已被证明非常节省内存,同时具有较低的计算使用量,同时产生与完全微调模型相当的结果。 使用PEFT训练的adapters通常比完整模型小一个数量级,使其方便共享、存储和加载。 <div class="flex flex-col justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/PEFT-hub-screenshot.png"/> <figcaption class="text-center">与完整尺寸的模型权重(约为700MB)相比,存储在Hub上的OPTForCausalLM模型的adapter权重仅为~6MB。</figcaption> </div> 如果您对学习更多关于🤗 PEFT库感兴趣,请查看[文档](https://huggingface.co/docs/peft/index)。 ## 设置 首先安装 🤗 PEFT: ```bash pip install peft ``` 如果你想尝试全新的特性,你可能会有兴趣从源代码安装这个库: ```bash pip install git+https://github.com/huggingface/peft.git ``` ## 支持的 PEFT 模型 Transformers原生支持一些PEFT方法,这意味着你可以加载本地存储或在Hub上的adapter权重,并使用几行代码轻松运行或训练它们。以下是受支持的方法: - [Low Rank Adapters](https://huggingface.co/docs/peft/conceptual_guides/lora) - [IA3](https://huggingface.co/docs/peft/conceptual_guides/ia3) - [AdaLoRA](https://arxiv.org/abs/2303.10512) 如果你想使用其他PEFT方法,例如提示学习或提示微调,或者关于通用的 🤗 PEFT库,请参阅[文档](https://huggingface.co/docs/peft/index)。 ## 加载 PEFT adapter 要从huggingface的Transformers库中加载并使用PEFTadapter模型,请确保Hub仓库或本地目录包含一个`adapter_config.json`文件和adapter权重,如上例所示。然后,您可以使用`AutoModelFor`类加载PEFT adapter模型。例如,要为因果语言建模加载一个PEFT adapter模型: 1. 指定PEFT模型id 2. 将其传递给[`AutoModelForCausalLM`]类 ```py from transformers import AutoModelForCausalLM, AutoTokenizer peft_model_id = "ybelkada/opt-350m-lora" model = AutoModelForCausalLM.from_pretrained(peft_model_id) ``` <Tip> 你可以使用`AutoModelFor`类或基础模型类(如`OPTForCausalLM`或`LlamaForCausalLM`)来加载一个PEFT adapter。 </Tip> 您也可以通过`load_adapter`方法来加载 PEFT adapter。 ```py from transformers import AutoModelForCausalLM, AutoTokenizer model_id = "facebook/opt-350m" peft_model_id = "ybelkada/opt-350m-lora" model = AutoModelForCausalLM.from_pretrained(model_id) model.load_adapter(peft_model_id) ``` ## 基于8bit或4bit进行加载 `bitsandbytes`集成支持8bit和4bit精度数据类型,这对于加载大模型非常有用,因为它可以节省内存(请参阅`bitsandbytes`[指南](./quantization#bitsandbytes-integration)以了解更多信息)。要有效地将模型分配到您的硬件,请在[`~PreTrainedModel.from_pretrained`]中添加`load_in_8bit`或`load_in_4bit`参数,并将`device_map="auto"`设置为: ```py from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig peft_model_id = "ybelkada/opt-350m-lora" model = AutoModelForCausalLM.from_pretrained(peft_model_id, quantization_config=BitsAndBytesConfig(load_in_8bit=True)) ``` ## 添加新的adapter 你可以使用[`~peft.PeftModel.add_adapter`]方法为一个已有adapter的模型添加一个新的adapter,只要新adapter的类型与当前adapter相同即可。例如,如果你有一个附加到模型上的LoRA adapter: ```py from transformers import AutoModelForCausalLM, OPTForCausalLM, AutoTokenizer from peft import PeftConfig model_id = "facebook/opt-350m" model = AutoModelForCausalLM.from_pretrained(model_id) lora_config = LoraConfig( target_modules=["q_proj", "k_proj"], init_lora_weights=False ) model.add_adapter(lora_config, adapter_name="adapter_1") ``` 添加一个新的adapter: ```py # attach new adapter with same config model.add_adapter(lora_config, adapter_name="adapter_2") ``` 现在您可以使用[`~peft.PeftModel.set_adapter`]来设置要使用的adapter。 ```py # use adapter_1 model.set_adapter("adapter_1") output = model.generate(**inputs) print(tokenizer.decode(output_disabled[0], skip_special_tokens=True)) # use adapter_2 model.set_adapter("adapter_2") output_enabled = model.generate(**inputs) print(tokenizer.decode(output_enabled[0], skip_special_tokens=True)) ``` ## 启用和禁用adapters 一旦您将adapter添加到模型中,您可以启用或禁用adapter模块。要启用adapter模块: ```py from transformers import AutoModelForCausalLM, OPTForCausalLM, AutoTokenizer from peft import PeftConfig model_id = "facebook/opt-350m" adapter_model_id = "ybelkada/opt-350m-lora" tokenizer = AutoTokenizer.from_pretrained(model_id) text = "Hello" inputs = tokenizer(text, return_tensors="pt") model = AutoModelForCausalLM.from_pretrained(model_id) peft_config = PeftConfig.from_pretrained(adapter_model_id) # to initiate with random weights peft_config.init_lora_weights = False model.add_adapter(peft_config) model.enable_adapters() output = model.generate(**inputs) ``` 要禁用adapter模块: ```py model.disable_adapters() output = model.generate(**inputs) ``` ## 训练一个 PEFT adapter PEFT适配器受[`Trainer`]类支持,因此您可以为您的特定用例训练适配器。它只需要添加几行代码即可。例如,要训练一个LoRA adapter: <Tip> 如果你不熟悉如何使用[`Trainer`]微调模型,请查看[微调预训练模型](training)教程。 </Tip> 1. 使用任务类型和超参数定义adapter配置(参见[`~peft.LoraConfig`]以了解超参数的详细信息)。 ```py from peft import LoraConfig peft_config = LoraConfig( lora_alpha=16, lora_dropout=0.1, r=64, bias="none", task_type="CAUSAL_LM", ) ``` 2. 将adapter添加到模型中。 ```py model.add_adapter(peft_config) ``` 3. 现在可以将模型传递给[`Trainer`]了! ```py trainer = Trainer(model=model, ...) trainer.train() ``` 要保存训练好的adapter并重新加载它: ```py model.save_pretrained(save_dir) model = AutoModelForCausalLM.from_pretrained(save_dir) ``` <!-- TODO: (@younesbelkada @stevhliu) - Link to PEFT docs for further details - Trainer - 8-bit / 4-bit examples ? -->
transformers/docs/source/zh/peft.md/0
{ "file_path": "transformers/docs/source/zh/peft.md", "repo_id": "transformers", "token_count": 3638 }
329
<!--- Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Language model training examples The following example showcases how to train a language model from scratch using the JAX/Flax backend. JAX/Flax allows you to trace pure functions and compile them into efficient, fused accelerator code on both GPU and TPU. Models written in JAX/Flax are **immutable** and updated in a purely functional way which enables simple and efficient model parallelism. ## Masked language modeling In the following, we demonstrate how to train a bi-directional transformer model using masked language modeling objective as introduced in [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805). More specifically, we demonstrate how JAX/Flax can be leveraged to pre-train [**`FacebookAI/roberta-base`**](https://huggingface.co/FacebookAI/roberta-base) in Norwegian on a single TPUv3-8 pod. The example script uses the 🤗 Datasets library. You can easily customize them to your needs if you need extra processing on your datasets. To setup all relevant files for training, let's create a directory. ```bash mkdir ./norwegian-roberta-base ``` ### Train tokenizer In the first step, we train a tokenizer to efficiently process the text input for the model. Similar to how it is shown in [How to train a new language model from scratch using Transformers and Tokenizers](https://huggingface.co/blog/how-to-train), we use a **`ByteLevelBPETokenizer`**. The tokenizer is trained on the complete Norwegian dataset of OSCAR and consequently saved in the cloned model directory. This can take up to 10 minutes depending on your hardware ☕. ```python from datasets import load_dataset from tokenizers import trainers, Tokenizer, normalizers, ByteLevelBPETokenizer # load dataset dataset = load_dataset("oscar", "unshuffled_deduplicated_no", split="train") # Instantiate tokenizer tokenizer = ByteLevelBPETokenizer() def batch_iterator(batch_size=1000): for i in range(0, len(dataset), batch_size): yield dataset[i: i + batch_size]["text"] # Customized training tokenizer.train_from_iterator(batch_iterator(), vocab_size=50265, min_frequency=2, special_tokens=[ "<s>", "<pad>", "</s>", "<unk>", "<mask>", ]) # Save files to disk tokenizer.save("./norwegian-roberta-base/tokenizer.json") ``` ### Create configuration Next, we create the model's configuration file. This is as simple as loading and storing [`**FacebookAI/roberta-base**`](https://huggingface.co/FacebookAI/roberta-base) in the local model folder: ```python from transformers import RobertaConfig config = RobertaConfig.from_pretrained("FacebookAI/roberta-base", vocab_size=50265) config.save_pretrained("./norwegian-roberta-base") ``` Great, we have set up our model repository. During training, we will automatically push the training logs and model weights to the repo. ### Train model Next we can run the example script to pretrain the model: ```bash python run_mlm_flax.py \ --output_dir="./norwegian-roberta-base" \ --model_type="roberta" \ --config_name="./norwegian-roberta-base" \ --tokenizer_name="./norwegian-roberta-base" \ --dataset_name="oscar" \ --dataset_config_name="unshuffled_deduplicated_no" \ --max_seq_length="128" \ --weight_decay="0.01" \ --per_device_train_batch_size="128" \ --per_device_eval_batch_size="128" \ --learning_rate="3e-4" \ --warmup_steps="1000" \ --overwrite_output_dir \ --num_train_epochs="18" \ --adam_beta1="0.9" \ --adam_beta2="0.98" \ --logging_steps="500" \ --save_steps="2500" \ --eval_steps="2500" \ --push_to_hub ``` Training should converge at a loss and accuracy of 1.78 and 0.64 respectively after 18 epochs on a single TPUv3-8. This should take less than 18 hours. Training statistics can be accessed on [tfhub.dev](https://tensorboard.dev/experiment/GdYmdak2TWeVz0DDRYOrrg). For a step-by-step walkthrough of how to do masked language modeling in Flax, please have a look at [this](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/masked_language_modeling_flax.ipynb) google colab. ## Causal language modeling In the following, we demonstrate how to train an auto-regressive causal transformer model in JAX/Flax. More specifically, we pretrain a randomly initialized [**`openai-community/gpt2`**](https://huggingface.co/openai-community/gpt2) model in Norwegian on a single TPUv3-8. to pre-train 124M [**`openai-community/gpt2`**](https://huggingface.co/openai-community/gpt2) in Norwegian on a single TPUv3-8 pod. The example script uses the 🤗 Datasets library. You can easily customize them to your needs if you need extra processing on your datasets. To setup all relevant files for training, let's create a directory. ```bash mkdir ./norwegian-gpt2 ``` ### Train tokenizer In the first step, we train a tokenizer to efficiently process the text input for the model. Similar to how it is shown in [How to train a new language model from scratch using Transformers and Tokenizers](https://huggingface.co/blog/how-to-train), we use a **`ByteLevelBPETokenizer`**. The tokenizer is trained on the complete Norwegian dataset of OSCAR and consequently saved in the cloned model directory. This can take up to 10 minutes depending on your hardware ☕. ```python from datasets import load_dataset from tokenizers import trainers, Tokenizer, normalizers, ByteLevelBPETokenizer # load dataset dataset = load_dataset("oscar", "unshuffled_deduplicated_no", split="train") # Instantiate tokenizer tokenizer = ByteLevelBPETokenizer() def batch_iterator(batch_size=1000): for i in range(0, len(dataset), batch_size): yield dataset[i: i + batch_size]["text"] # Customized training tokenizer.train_from_iterator(batch_iterator(), vocab_size=50257, min_frequency=2, special_tokens=[ "<s>", "<pad>", "</s>", "<unk>", "<mask>", ]) # Save files to disk tokenizer.save("./norwegian-gpt2/tokenizer.json") ``` ### Create configuration Next, we create the model's configuration file. This is as simple as loading and storing [`**openai-community/gpt2**`](https://huggingface.co/openai-community/gpt2) in the local model folder: ```python from transformers import GPT2Config config = GPT2Config.from_pretrained("openai-community/gpt2", resid_pdrop=0.0, embd_pdrop=0.0, attn_pdrop=0.0, vocab_size=50257) config.save_pretrained("./norwegian-gpt2") ``` Great, we have set up our model repository. During training, we will now automatically push the training logs and model weights to the repo. ### Train model Finally, we can run the example script to pretrain the model: ```bash python run_clm_flax.py \ --output_dir="./norwegian-gpt2" \ --model_type="gpt2" \ --config_name="./norwegian-gpt2" \ --tokenizer_name="./norwegian-gpt2" \ --dataset_name="oscar" \ --dataset_config_name="unshuffled_deduplicated_no" \ --do_train --do_eval \ --block_size="512" \ --per_device_train_batch_size="64" \ --per_device_eval_batch_size="64" \ --learning_rate="5e-3" --warmup_steps="1000" \ --adam_beta1="0.9" --adam_beta2="0.98" --weight_decay="0.01" \ --overwrite_output_dir \ --num_train_epochs="20" \ --logging_steps="500" \ --save_steps="2500" \ --eval_steps="2500" \ --push_to_hub ``` Training should converge at a loss and perplexity of 3.24 and 25.72 respectively after 20 epochs on a single TPUv3-8. This should take less than ~21 hours. Training statistics can be accessed on [tfhub.dev](https://tensorboard.dev/experiment/2zEhLwJ0Qp2FAkI3WVH9qA). For a step-by-step walkthrough of how to do causal language modeling in Flax, please have a look at [this](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/causal_language_modeling_flax.ipynb) google colab. ## T5-like span-masked language modeling In the following, we demonstrate how to train a T5 model using the span-masked language model objective as proposed in the [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/abs/1910.10683). More specifically, we demonstrate how JAX/Flax can be leveraged to pre-train [**`google/t5-v1_1-base`**](https://huggingface.co/google/t5-v1_1-base) in Norwegian on a single TPUv3-8 pod. The example script uses the 🤗 Datasets library. You can easily customize them to your needs if you need extra processing on your datasets. Let's start by creating a model repository to save the trained model and logs. Here we call the model `"norwegian-t5-base"`, but you can change the model name as you like. To setup all relevant files for training, let's create a directory. ```bash cd ./norwegian-t5-base ``` ### Train tokenizer In the first step, we train a tokenizer to efficiently process the text input for the model. We make use of the [tokenizers](https://github.com/huggingface/tokenizers) library to train a sentencepiece unigram tokenizer as shown in [t5_tokenizer_model.py](https://github.com/huggingface/transformers/tree/main/examples/flax/language-modeling/t5_tokenizer_model.py) which is heavily inspired from [yandex-research/DeDLOC's tokenizer model](https://github.com/yandex-research/DeDLOC/blob/5c994bc64e573702a9a79add3ecd68b38f14b548/sahajbert/tokenizer/tokenizer_model.py) . The tokenizer is trained on the complete Norwegian dataset of OSCAR and consequently saved in the cloned model directory. This can take up to 120 minutes depending on your hardware ☕☕☕ . ```python import datasets from t5_tokenizer_model import SentencePieceUnigramTokenizer vocab_size = 32_000 input_sentence_size = None # Initialize a dataset dataset = datasets.load_dataset("oscar", name="unshuffled_deduplicated_no", split="train") tokenizer = SentencePieceUnigramTokenizer(unk_token="<unk>", eos_token="</s>", pad_token="<pad>") # Build an iterator over this dataset def batch_iterator(input_sentence_size=None): if input_sentence_size is None: input_sentence_size = len(dataset) batch_length = 100 for i in range(0, input_sentence_size, batch_length): yield dataset[i: i + batch_length]["text"] # Train tokenizer tokenizer.train_from_iterator( iterator=batch_iterator(input_sentence_size=input_sentence_size), vocab_size=vocab_size, show_progress=True, ) # Save files to disk tokenizer.save("./norwegian-t5-base/tokenizer.json") ``` ### Create configuration Next, we create the model's configuration file. This is as simple as loading and storing [`**google/t5-v1_1-base**`](https://huggingface.co/google/t5-v1_1-base) in the local model folder: ```python from transformers import T5Config config = T5Config.from_pretrained("google/t5-v1_1-base", vocab_size=tokenizer.get_vocab_size()) config.save_pretrained("./norwegian-t5-base") ``` Great, we have set up our model repository. During training, we will automatically push the training logs and model weights to the repo. ### Train model Next we can run the example script to pretrain the model: ```bash python run_t5_mlm_flax.py \ --output_dir="./norwegian-t5-base" \ --model_type="t5" \ --config_name="./norwegian-t5-base" \ --tokenizer_name="./norwegian-t5-base" \ --dataset_name="oscar" \ --dataset_config_name="unshuffled_deduplicated_no" \ --max_seq_length="512" \ --per_device_train_batch_size="32" \ --per_device_eval_batch_size="32" \ --adafactor \ --learning_rate="0.005" \ --weight_decay="0.001" \ --warmup_steps="2000" \ --overwrite_output_dir \ --logging_steps="500" \ --save_steps="10000" \ --eval_steps="2500" \ --push_to_hub ``` Training should converge at a loss and accuracy of 2.36 and 57.0 respectively after 3 epochs on a single TPUv3-8. This should take around 4.5 hours. Training statistics can be accessed on directly on the 🤗 [hub](https://huggingface.co/patrickvonplaten/t5-base-norwegian/tensorboard) ## BART: Denoising language modeling In the following, we demonstrate how to train a BART model using denoising language modeling objective as introduced in [BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension](https://arxiv.org/abs/1910.13461). More specifically, we demonstrate how JAX/Flax can be leveraged to pre-train [**`bart-base`**](https://huggingface.co/facebook/bart-base) in Norwegian on a single TPUv3-8 pod. The example script uses the 🤗 Datasets library. You can easily customize them to your needs if you need extra processing on your datasets. To setup all relevant files for training, let's create a directory. ```bash mkdir ./norwegian-bart-base ``` ### Train tokenizer In the first step, we train a tokenizer to efficiently process the text input for the model. Similar to how it is shown in [How to train a new language model from scratch using Transformers and Tokenizers](https://huggingface.co/blog/how-to-train), we use a **`ByteLevelBPETokenizer`**. The tokenizer is trained on the complete Norwegian dataset of OSCAR and consequently saved in the cloned model directory. This can take up to 10 minutes depending on your hardware ☕. ```python from datasets import load_dataset from tokenizers import trainers, Tokenizer, normalizers, ByteLevelBPETokenizer # load dataset dataset = load_dataset("oscar", "unshuffled_deduplicated_no", split="train") # Instantiate tokenizer tokenizer = ByteLevelBPETokenizer() def batch_iterator(batch_size=1000): for i in range(0, len(dataset), batch_size): yield dataset[i: i + batch_size]["text"] # Customized training tokenizer.train_from_iterator(batch_iterator(), vocab_size=50265, min_frequency=2, special_tokens=[ "<s>", "<pad>", "</s>", "<unk>", "<mask>", ]) # Save files to disk tokenizer.save("./norwegian-bart-base/tokenizer.json") ``` ### Create configuration Next, we create the model's configuration file. This is as simple as loading and storing [`**facebook/bart-base**`](https://huggingface.co/facebook/bart-base) in the local model folder: ```python from transformers import BartConfig config = BartConfig.from_pretrained("facebook/bart-base", vocab_size=50265) config.save_pretrained("./norwegian-bart-base") ``` Great, we have set up our model repository. During training, we will automatically push the training logs and model weights to the repo. ### Train model Next we can run the example script to pretrain the model: ```bash python run_bart_dlm_flax.py \ --output_dir="./norwegian-bart-base" \ --config_name="./norwegian-bart-base" \ --tokenizer_name="./norwegian-bart-base" \ --dataset_name="oscar" \ --dataset_config_name="unshuffled_deduplicated_no" \ --max_seq_length="1024" \ --per_device_train_batch_size="32" \ --per_device_eval_batch_size="32" \ --learning_rate="1e-4" \ --warmup_steps="2000" \ --overwrite_output_dir \ --logging_steps="500" \ --save_steps="2000" \ --eval_steps="2000" \ --push_to_hub ``` Training should converge at a loss and accuracy of 1.36 and 0.77 respectively after 3 epochs on a single TPUv3-8. This should take less than 6 hours. Training statistics can be accessed on [tfhub.dev](https://tensorboard.dev/experiment/Maw62QlaSXWS0MOf2V2lbg/). ## Runtime evaluation We also ran masked language modeling using PyTorch/XLA on a TPUv3-8, and PyTorch on 8 V100 GPUs. We report the overall training time below. For reproducibility, we state the training commands used for PyTorch/XLA and PyTorch further below. | Task | [TPU v3-8 (Flax)](https://tensorboard.dev/experiment/GdYmdak2TWeVz0DDRYOrrg/) | [TPU v3-8 (Pytorch/XLA)](https://tensorboard.dev/experiment/7Jq1kcQQRAmy12KOdXek7A/)| [8 GPU (PyTorch)](https://tensorboard.dev/experiment/PJneV8FQRxa2unPw1QnVHA) | |-------|-----------|------------|------------| | MLM | 15h32m | 23h46m | 44h14m | *All experiments are ran on Google Cloud Platform. GPU experiments are ran without further optimizations besides JAX transformations. GPU experiments are ran with full precision (fp32). "TPU v3-8" are 8 TPU cores on 4 chips (each chips has 2 cores), while "8 GPU" are 8 GPU chips. ### Script to run MLM with PyTorch/XLA on TPUv3-8 For comparison one can run the same pre-training with PyTorch/XLA on TPU. To set up PyTorch/XLA on Cloud TPU VMs, please refer to [this](https://cloud.google.com/tpu/docs/pytorch-xla-ug-tpu-vm) guide. Having created the tokenizer and configuration in `norwegian-roberta-base`, we create the following symbolic links: ```bash ln -s ~/transformers/examples/pytorch/language-modeling/run_mlm.py ./ ln -s ~/transformers/examples/pytorch/xla_spawn.py ./ ``` , set the following environment variables: ```bash export XRT_TPU_CONFIG="localservice;0;localhost:51011" unset LD_PRELOAD export NUM_TPUS=8 export TOKENIZERS_PARALLELISM=0 export MODEL_DIR="./norwegian-roberta-base" mkdir -p ${MODEL_DIR} ``` , and start training as follows: ```bash python3 xla_spawn.py --num_cores ${NUM_TPUS} run_mlm.py --output_dir="./runs" \ --model_type="roberta" \ --config_name="${MODEL_DIR}" \ --tokenizer_name="${MODEL_DIR}" \ --dataset_name="oscar" \ --dataset_config_name="unshuffled_deduplicated_no" \ --max_seq_length="128" \ --weight_decay="0.01" \ --per_device_train_batch_size="128" \ --per_device_eval_batch_size="128" \ --learning_rate="3e-4" \ --warmup_steps="1000" \ --overwrite_output_dir \ --num_train_epochs="18" \ --adam_beta1="0.9" \ --adam_beta2="0.98" \ --do_train \ --do_eval \ --logging_steps="500" \ --eval_strategy="epoch" \ --report_to="tensorboard" \ --save_strategy="no" ``` ### Script to compare pre-training with PyTorch on 8 GPU V100's For comparison you can run the same pre-training with PyTorch on GPU. Note that we have to make use of `gradient_accumulation` because the maximum batch size that fits on a single V100 GPU is 32 instead of 128. Having created the tokenizer and configuration in `norwegian-roberta-base`, we create the following symbolic links: ```bash ln -s ~/transformers/examples/pytorch/language-modeling/run_mlm.py ./ ``` , set some environment variables: ```bash export NUM_GPUS=8 export TOKENIZERS_PARALLELISM=0 export MODEL_DIR="./norwegian-roberta-base" mkdir -p ${MODEL_DIR} ``` , and can start training as follows: ```bash python3 -m torch.distributed.launch --nproc_per_node ${NUM_GPUS} run_mlm.py \ --output_dir="${MODEL_DIR}" \ --model_type="roberta" \ --config_name="${MODEL_DIR}" \ --tokenizer_name="${MODEL_DIR}" \ --dataset_name="oscar" \ --dataset_config_name="unshuffled_deduplicated_no" \ --max_seq_length="128" \ --weight_decay="0.01" \ --per_device_train_batch_size="32" \ --per_device_eval_batch_size="32" \ --gradient_accumulation="4" \ --learning_rate="3e-4" \ --warmup_steps="1000" \ --overwrite_output_dir \ --num_train_epochs="18" \ --adam_beta1="0.9" \ --adam_beta2="0.98" \ --do_train \ --do_eval \ --logging_steps="500" \ --eval_strategy="steps" \ --report_to="tensorboard" \ --save_strategy="no" ```
transformers/examples/flax/language-modeling/README.md/0
{ "file_path": "transformers/examples/flax/language-modeling/README.md", "repo_id": "transformers", "token_count": 6844 }
330
#!/usr/bin/env python # coding=utf-8 # Copyright 2021 The HuggingFace Team All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for summarization. """ # You can also adapt this script on your own sequence to sequence task. Pointers for this are left as comments. import json import logging import math import os import sys import time from dataclasses import asdict, dataclass, field from enum import Enum from functools import partial from pathlib import Path from typing import Callable, Optional import datasets import evaluate import jax import jax.numpy as jnp import nltk # Here to have a nice missing dependency error message early on import numpy as np import optax from datasets import Dataset, load_dataset from filelock import FileLock from flax import jax_utils, traverse_util from flax.jax_utils import pad_shard_unpad, unreplicate from flax.training import train_state from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key from huggingface_hub import HfApi from tqdm import tqdm import transformers from transformers import ( CONFIG_MAPPING, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, AutoConfig, AutoTokenizer, FlaxAutoModelForSeq2SeqLM, HfArgumentParser, is_tensorboard_available, ) from transformers.utils import is_offline_mode, send_example_telemetry logger = logging.getLogger(__name__) try: nltk.data.find("tokenizers/punkt") except (LookupError, OSError): if is_offline_mode(): raise LookupError( "Offline mode: run this script without TRANSFORMERS_OFFLINE first to download nltk data files" ) with FileLock(".lock") as lock: nltk.download("punkt", quiet=True) MODEL_CONFIG_CLASSES = list(FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class TrainingArguments: output_dir: str = field( metadata={"help": "The output directory where the model predictions and checkpoints will be written."}, ) overwrite_output_dir: bool = field( default=False, metadata={ "help": ( "Overwrite the content of the output directory. " "Use this to continue training if output_dir points to a checkpoint directory." ) }, ) do_train: bool = field(default=False, metadata={"help": "Whether to run training."}) do_eval: bool = field(default=False, metadata={"help": "Whether to run eval on the dev set."}) do_predict: bool = field(default=False, metadata={"help": "Whether to run predictions on the test set."}) per_device_train_batch_size: int = field( default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for training."} ) per_device_eval_batch_size: int = field( default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for evaluation."} ) learning_rate: float = field(default=5e-5, metadata={"help": "The initial learning rate for AdamW."}) weight_decay: float = field(default=0.0, metadata={"help": "Weight decay for AdamW if we apply some."}) adam_beta1: float = field(default=0.9, metadata={"help": "Beta1 for AdamW optimizer"}) adam_beta2: float = field(default=0.999, metadata={"help": "Beta2 for AdamW optimizer"}) adam_epsilon: float = field(default=1e-8, metadata={"help": "Epsilon for AdamW optimizer."}) label_smoothing_factor: float = field( default=0.0, metadata={"help": "The label smoothing epsilon to apply (zero means no label smoothing)."} ) adafactor: bool = field(default=False, metadata={"help": "Whether or not to replace AdamW by Adafactor."}) num_train_epochs: float = field(default=3.0, metadata={"help": "Total number of training epochs to perform."}) warmup_steps: int = field(default=0, metadata={"help": "Linear warmup over warmup_steps."}) logging_steps: int = field(default=500, metadata={"help": "Log every X updates steps."}) save_steps: int = field(default=500, metadata={"help": "Save checkpoint every X updates steps."}) eval_steps: int = field(default=None, metadata={"help": "Run an evaluation every X steps."}) seed: int = field(default=42, metadata={"help": "Random seed that will be set at the beginning of training."}) push_to_hub: bool = field( default=False, metadata={"help": "Whether or not to upload the trained model to the model hub after training."} ) hub_model_id: str = field( default=None, metadata={"help": "The name of the repository to keep in sync with the local `output_dir`."} ) hub_token: str = field(default=None, metadata={"help": "The token to use to push to the Model Hub."}) gradient_checkpointing: bool = field( default=False, metadata={ "help": "If True, use gradient checkpointing to save memory at the expense of slower backward pass." }, ) def __post_init__(self): if self.output_dir is not None: self.output_dir = os.path.expanduser(self.output_dir) def to_dict(self): """ Serializes this instance while replace `Enum` by their values (for JSON serialization support). It obfuscates the token values by removing their value. """ d = asdict(self) for k, v in d.items(): if isinstance(v, Enum): d[k] = v.value if isinstance(v, list) and len(v) > 0 and isinstance(v[0], Enum): d[k] = [x.value for x in v] if k.endswith("_token"): d[k] = f"<{k.upper()}>" return d @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. """ model_name_or_path: Optional[str] = field( default=None, metadata={ "help": ( "The model checkpoint for weights initialization. Don't set if you want to train a model from scratch." ) }, ) model_type: Optional[str] = field( default=None, metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)}, ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) dtype: Optional[str] = field( default="float32", metadata={ "help": ( "Floating-point format in which the model weights should be initialized and trained. Choose one of" " `[float32, float16, bfloat16]`." ) }, ) token: str = field( default=None, metadata={ "help": ( "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " "generated when running `huggingface-cli login` (stored in `~/.huggingface`)." ) }, ) trust_remote_code: bool = field( default=False, metadata={ "help": ( "Whether to trust the execution of code from datasets/models defined on the Hub." " This option should only be set to `True` for repositories you trust and in which you have read the" " code, as it will execute code present on the Hub on your local machine." ) }, ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ dataset_name: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) text_column: Optional[str] = field( default=None, metadata={"help": "The name of the column in the datasets containing the full texts (for summarization)."}, ) summary_column: Optional[str] = field( default=None, metadata={"help": "The name of the column in the datasets containing the summaries (for summarization)."}, ) train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, ) test_file: Optional[str] = field( default=None, metadata={"help": "An optional input predict data file to do prediction on (a text file)."}, ) max_source_length: Optional[int] = field( default=1024, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) max_target_length: Optional[int] = field( default=128, metadata={ "help": ( "The maximum total sequence length for target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) val_max_target_length: Optional[int] = field( default=None, metadata={ "help": ( "The maximum total sequence length for validation target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`. " "This argument is also used to override the `max_length` param of `model.generate`, which is used " "during evaluation." ) }, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) max_predict_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) }, ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) source_prefix: Optional[str] = field( default=None, metadata={"help": "A prefix to add before every source text (useful for T5 models)."} ) predict_with_generate: bool = field( default=False, metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) num_beams: Optional[int] = field( default=1, metadata={ "help": ( "Number of beams to use for evaluation. This argument will be passed to `model.generate`, " "which is used during evaluation." ) }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) def __post_init__(self): if ( self.dataset_name is None and self.train_file is None and self.validation_file is None and self.test_file is None ): raise ValueError("Need either a dataset name or a training, validation, or test file.") else: if self.train_file is not None: extension = self.train_file.split(".")[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: extension = self.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." if self.test_file is not None: extension = self.test_file.split(".")[-1] assert extension in ["csv", "json"], "`test_file` should be a csv or a json file." if self.val_max_target_length is None: self.val_max_target_length = self.max_target_length summarization_name_mapping = { "amazon_reviews_multi": ("review_body", "review_title"), "big_patent": ("description", "abstract"), "cnn_dailymail": ("article", "highlights"), "orange_sum": ("text", "summary"), "pn_summary": ("article", "summary"), "psc": ("extract_text", "summary_text"), "samsum": ("dialogue", "summary"), "thaisum": ("body", "summary"), "xglue": ("news_body", "news_title"), "xsum": ("document", "summary"), "wiki_summary": ("article", "highlights"), } class TrainState(train_state.TrainState): dropout_rng: jnp.ndarray def replicate(self): return jax_utils.replicate(self).replace(dropout_rng=shard_prng_key(self.dropout_rng)) def data_loader(rng: jax.random.PRNGKey, dataset: Dataset, batch_size: int, shuffle: bool = False, drop_last=True): """ Returns batches of size `batch_size` from `dataset`. If `drop_last` is set to `False`, the final batch may be incomplete, and range in size from 1 to `batch_size`. Shuffle batches if `shuffle` is `True`. """ if shuffle: batch_idx = jax.random.permutation(rng, len(dataset)) batch_idx = np.asarray(batch_idx) else: batch_idx = np.arange(len(dataset)) if drop_last: steps_per_epoch = len(dataset) // batch_size batch_idx = batch_idx[: steps_per_epoch * batch_size] # Skip incomplete batch. batch_idx = batch_idx.reshape((steps_per_epoch, batch_size)) else: steps_per_epoch = math.ceil(len(dataset) / batch_size) batch_idx = np.array_split(batch_idx, steps_per_epoch) for idx in batch_idx: batch = dataset[idx] batch = {k: np.array(v) for k, v in batch.items()} yield batch def write_metric(summary_writer, train_metrics, eval_metrics, train_time, step): summary_writer.scalar("train_time", train_time, step) train_metrics = get_metrics(train_metrics) for key, vals in train_metrics.items(): tag = f"train_{key}" for i, val in enumerate(vals): summary_writer.scalar(tag, val, step - len(vals) + i + 1) for metric_name, value in eval_metrics.items(): summary_writer.scalar(f"eval_{metric_name}", value, step) def create_learning_rate_fn( train_ds_size: int, train_batch_size: int, num_train_epochs: int, num_warmup_steps: int, learning_rate: float ) -> Callable[[int], jnp.ndarray]: """Returns a linear warmup, linear_decay learning rate function.""" steps_per_epoch = train_ds_size // train_batch_size num_train_steps = steps_per_epoch * num_train_epochs warmup_fn = optax.linear_schedule(init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps) decay_fn = optax.linear_schedule( init_value=learning_rate, end_value=0, transition_steps=num_train_steps - num_warmup_steps ) schedule_fn = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[num_warmup_steps]) return schedule_fn def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_summarization", model_args, data_args, framework="flax") if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR) if jax.process_index() == 0: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # Set the verbosity to info of the Transformers logger (on main process only): logger.info(f"Training/evaluation parameters {training_args}") # Handle the repository creation if training_args.push_to_hub: # Retrieve of infer repo_name repo_name = training_args.hub_model_id if repo_name is None: repo_name = Path(training_args.output_dir).absolute().name # Create repo and retrieve repo_id api = HfApi() repo_id = api.create_repo(repo_name, exist_ok=True, token=training_args.hub_token).repo_id # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files this script will use the first column for the full texts and the second column for the # summaries (unless you specify column names for this with the `text_column` and `summary_column` arguments). # if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. dataset = load_dataset( data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, keep_in_memory=False, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) else: data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file extension = data_args.train_file.split(".")[-1] if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = data_args.validation_file.split(".")[-1] if data_args.test_file is not None: data_files["test"] = data_args.test_file extension = data_args.test_file.split(".")[-1] dataset = load_dataset( extension, data_files=data_files, cache_dir=model_args.cache_dir, token=model_args.token, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained model and tokenizer if model_args.config_name: config = AutoConfig.from_pretrained( model_args.config_name, cache_dir=model_args.cache_dir, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) elif model_args.model_name_or_path: config = AutoConfig.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) else: config = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if model_args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) elif model_args.model_name_or_path: tokenizer = AutoTokenizer.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script. " "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) if model_args.model_name_or_path: model = FlaxAutoModelForSeq2SeqLM.from_pretrained( model_args.model_name_or_path, config=config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype), token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) else: model = FlaxAutoModelForSeq2SeqLM.from_config( config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype), trust_remote_code=model_args.trust_remote_code, ) if training_args.gradient_checkpointing: model.enable_gradient_checkpointing() if model.config.decoder_start_token_id is None: raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined") prefix = data_args.source_prefix if data_args.source_prefix is not None else "" # Preprocessing the datasets. # We need to tokenize inputs and targets. if training_args.do_train: if "train" not in dataset: raise ValueError("--do_train requires a train dataset") column_names = dataset["train"].column_names elif training_args.do_eval: if "validation" not in dataset: raise ValueError("--do_eval requires a validation dataset") column_names = dataset["validation"].column_names elif training_args.do_predict: if "test" not in dataset: raise ValueError("--do_predict requires a test dataset") column_names = dataset["test"].column_names else: logger.info("There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.") return # Get the column names for input/target. dataset_columns = summarization_name_mapping.get(data_args.dataset_name, None) if data_args.text_column is None: text_column = dataset_columns[0] if dataset_columns is not None else column_names[0] else: text_column = data_args.text_column if text_column not in column_names: raise ValueError( f"--text_column' value '{data_args.text_column}' needs to be one of: {', '.join(column_names)}" ) if data_args.summary_column is None: summary_column = dataset_columns[1] if dataset_columns is not None else column_names[1] else: summary_column = data_args.summary_column if summary_column not in column_names: raise ValueError( f"--summary_column' value '{data_args.summary_column}' needs to be one of: {', '.join(column_names)}" ) # Temporarily set max_target_length for training. max_target_length = data_args.max_target_length # In Flax, for seq2seq models we need to pass `decoder_input_ids` # as the Flax models don't accept `labels`, we need to prepare the decoder_input_ids here # for that dynamically import the `shift_tokens_right` function from the model file model_module = __import__(model.__module__, fromlist=["shift_tokens_tight"]) shift_tokens_right_fn = getattr(model_module, "shift_tokens_right") # Setting padding="max_length" as we need fixed length inputs for jitted functions def preprocess_function(examples): inputs = examples[text_column] targets = examples[summary_column] inputs = [prefix + inp for inp in inputs] model_inputs = tokenizer( inputs, max_length=data_args.max_source_length, padding="max_length", truncation=True, return_tensors="np" ) # Setup the tokenizer for targets labels = tokenizer( text_target=targets, max_length=max_target_length, padding="max_length", truncation=True, return_tensors="np", ) model_inputs["labels"] = labels["input_ids"] decoder_input_ids = shift_tokens_right_fn( labels["input_ids"], config.pad_token_id, config.decoder_start_token_id ) model_inputs["decoder_input_ids"] = np.asarray(decoder_input_ids) # We need decoder_attention_mask so we can ignore pad tokens from loss model_inputs["decoder_attention_mask"] = labels["attention_mask"] return model_inputs if training_args.do_train: train_dataset = dataset["train"] if data_args.max_train_samples is not None: max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) train_dataset = train_dataset.map( preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on train dataset", ) if training_args.do_eval: max_target_length = data_args.val_max_target_length eval_dataset = dataset["validation"] if data_args.max_eval_samples is not None: max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) eval_dataset = eval_dataset.map( preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on validation dataset", ) if training_args.do_predict: max_target_length = data_args.val_max_target_length predict_dataset = dataset["test"] if data_args.max_predict_samples is not None: max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples) predict_dataset = predict_dataset.select(range(max_predict_samples)) predict_dataset = predict_dataset.map( preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on prediction dataset", ) # Metric metric = evaluate.load("rouge", cache_dir=model_args.cache_dir) def postprocess_text(preds, labels): preds = [pred.strip() for pred in preds] labels = [label.strip() for label in labels] # rougeLSum expects newline after each sentence preds = ["\n".join(nltk.sent_tokenize(pred)) for pred in preds] labels = ["\n".join(nltk.sent_tokenize(label)) for label in labels] return preds, labels def compute_metrics(preds, labels): decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True) decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) # Some simple post-processing decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels) result = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True) result = {k: round(v * 100, 4) for k, v in result.items()} prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds] result["gen_len"] = np.mean(prediction_lens) return result # Enable tensorboard only on the master node has_tensorboard = is_tensorboard_available() if has_tensorboard and jax.process_index() == 0: try: from flax.metrics.tensorboard import SummaryWriter summary_writer = SummaryWriter(log_dir=Path(training_args.output_dir)) except ImportError as ie: has_tensorboard = False logger.warning( f"Unable to display metrics through TensorBoard because some package are not installed: {ie}" ) else: logger.warning( "Unable to display metrics through TensorBoard because the package is not installed: " "Please run pip install tensorboard to enable." ) # Initialize our training rng = jax.random.PRNGKey(training_args.seed) rng, dropout_rng = jax.random.split(rng) # Store some constant num_epochs = int(training_args.num_train_epochs) train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count() per_device_eval_batch_size = int(training_args.per_device_eval_batch_size) eval_batch_size = per_device_eval_batch_size * jax.device_count() steps_per_epoch = len(train_dataset) // train_batch_size total_train_steps = steps_per_epoch * num_epochs # Create learning rate schedule linear_decay_lr_schedule_fn = create_learning_rate_fn( len(train_dataset), train_batch_size, training_args.num_train_epochs, training_args.warmup_steps, training_args.learning_rate, ) # We use Optax's "masking" functionality to not apply weight decay # to bias and LayerNorm scale parameters. decay_mask_fn returns a # mask boolean with the same structure as the parameters. # The mask is True for parameters that should be decayed. def decay_mask_fn(params): flat_params = traverse_util.flatten_dict(params) # find out all LayerNorm parameters layer_norm_candidates = ["layernorm", "layer_norm", "ln"] layer_norm_named_params = { layer[-2:] for layer_norm_name in layer_norm_candidates for layer in flat_params.keys() if layer_norm_name in "".join(layer).lower() } flat_mask = {path: (path[-1] != "bias" and path[-2:] not in layer_norm_named_params) for path in flat_params} return traverse_util.unflatten_dict(flat_mask) # create adam optimizer adamw = optax.adamw( learning_rate=linear_decay_lr_schedule_fn, b1=training_args.adam_beta1, b2=training_args.adam_beta2, eps=training_args.adam_epsilon, weight_decay=training_args.weight_decay, mask=decay_mask_fn, ) # Setup train state state = TrainState.create(apply_fn=model.__call__, params=model.params, tx=adamw, dropout_rng=dropout_rng) # label smoothed cross entropy def loss_fn(logits, labels, padding_mask, label_smoothing_factor=0.0): """ The label smoothing implementation is adapted from Flax's official example: https://github.com/google/flax/blob/87a211135c6a377c8f29048a1cac3840e38b9da4/examples/wmt/train.py#L104 """ vocab_size = logits.shape[-1] confidence = 1.0 - label_smoothing_factor low_confidence = (1.0 - confidence) / (vocab_size - 1) normalizing_constant = -( confidence * jnp.log(confidence) + (vocab_size - 1) * low_confidence * jnp.log(low_confidence + 1e-20) ) soft_labels = onehot(labels, vocab_size, on_value=confidence, off_value=low_confidence) loss = optax.softmax_cross_entropy(logits, soft_labels) loss = loss - normalizing_constant # ignore padded tokens from loss loss = loss * padding_mask loss = loss.sum() num_labels = padding_mask.sum() return loss, num_labels # Define gradient update step fn def train_step(state, batch, label_smoothing_factor=0.0): dropout_rng, new_dropout_rng = jax.random.split(state.dropout_rng) def compute_loss(params): labels = batch.pop("labels") logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0] loss, num_labels = loss_fn(logits, labels, batch["decoder_attention_mask"], label_smoothing_factor) return loss, num_labels grad_fn = jax.value_and_grad(compute_loss, has_aux=True) (loss, num_labels), grad = grad_fn(state.params) num_labels = jax.lax.psum(num_labels, "batch") # true loss = total loss / total samples loss = jax.lax.psum(loss, "batch") loss = jax.tree_util.tree_map(lambda x: x / num_labels, loss) # true grad = total grad / total samples grad = jax.lax.psum(grad, "batch") grad = jax.tree_util.tree_map(lambda x: x / num_labels, grad) new_state = state.apply_gradients(grads=grad, dropout_rng=new_dropout_rng) metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)} return new_state, metrics # Define eval fn def eval_step(params, batch, label_smoothing_factor=0.0): labels = batch.pop("labels") logits = model(**batch, params=params, train=False)[0] loss, num_labels = loss_fn(logits, labels, batch["decoder_attention_mask"], label_smoothing_factor) num_labels = jax.lax.psum(num_labels, "batch") # true loss = total loss / total samples loss = jax.lax.psum(loss, "batch") loss = jax.tree_util.tree_map(lambda x: x / num_labels, loss) metrics = {"loss": loss} return metrics # Define generation function max_length = ( data_args.val_max_target_length if data_args.val_max_target_length is not None else model.config.max_length ) num_beams = data_args.num_beams if data_args.num_beams is not None else model.config.num_beams gen_kwargs = {"max_length": max_length, "num_beams": num_beams} def generate_step(params, batch): model.params = params output_ids = model.generate(batch["input_ids"], attention_mask=batch["attention_mask"], **gen_kwargs) return output_ids.sequences # Create parallel version of the train and eval step p_train_step = jax.pmap( partial(train_step, label_smoothing_factor=training_args.label_smoothing_factor), "batch", donate_argnums=(0,) ) p_eval_step = jax.pmap(partial(eval_step, label_smoothing_factor=training_args.label_smoothing_factor), "batch") p_generate_step = jax.pmap(generate_step, "batch") # Replicate the train state on each device state = state.replicate() logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {num_epochs}") logger.info(f" Instantaneous batch size per device = {training_args.per_device_train_batch_size}") logger.info(f" Total train batch size (w. parallel & distributed) = {train_batch_size}") logger.info(f" Total optimization steps = {total_train_steps}") train_time = 0 epochs = tqdm(range(num_epochs), desc=f"Epoch ... (1/{num_epochs})", position=0) for epoch in epochs: # ======================== Training ================================ train_start = time.time() # Create sampling rng rng, input_rng = jax.random.split(rng) train_metrics = [] # Generate an epoch by shuffling sampling indices from the train dataset train_loader = data_loader(input_rng, train_dataset, train_batch_size, shuffle=True) steps_per_epoch = len(train_dataset) // train_batch_size # train for _ in tqdm(range(steps_per_epoch), desc="Training...", position=1, leave=False): batch = next(train_loader) batch = shard(batch) state, train_metric = p_train_step(state, batch) train_metrics.append(train_metric) train_time += time.time() - train_start train_metric = unreplicate(train_metric) epochs.write( f"Epoch... ({epoch + 1}/{num_epochs} | Loss: {train_metric['loss']}, Learning Rate:" f" {train_metric['learning_rate']})" ) # ======================== Evaluating ============================== eval_metrics = [] eval_preds = [] eval_labels = [] eval_loader = data_loader(input_rng, eval_dataset, eval_batch_size, drop_last=False) eval_steps = math.ceil(len(eval_dataset) / eval_batch_size) for _ in tqdm(range(eval_steps), desc="Evaluating...", position=2, leave=False): # Model forward batch = next(eval_loader) labels = batch["labels"] metrics = pad_shard_unpad(p_eval_step, static_return=True)( state.params, batch, min_device_batch=per_device_eval_batch_size ) eval_metrics.append(metrics) # generation if data_args.predict_with_generate: generated_ids = pad_shard_unpad(p_generate_step)(state.params, batch) eval_preds.extend(jax.device_get(generated_ids.reshape(-1, gen_kwargs["max_length"]))) eval_labels.extend(labels) # normalize eval metrics eval_metrics = get_metrics(eval_metrics) eval_metrics = jax.tree_util.tree_map(jnp.mean, eval_metrics) # compute ROUGE metrics rouge_desc = "" if data_args.predict_with_generate: rouge_metrics = compute_metrics(eval_preds, eval_labels) eval_metrics.update(rouge_metrics) rouge_desc = " ".join([f"Eval {key}: {value} |" for key, value in rouge_metrics.items()]) # Print metrics and update progress bar desc = f"Epoch... ({epoch + 1}/{num_epochs} | Eval Loss: {eval_metrics['loss']} | {rouge_desc})" epochs.write(desc) epochs.desc = desc # Save metrics if has_tensorboard and jax.process_index() == 0: cur_step = epoch * (len(train_dataset) // train_batch_size) write_metric(summary_writer, train_metrics, eval_metrics, train_time, cur_step) # save checkpoint after each epoch and push checkpoint to the hub if jax.process_index() == 0: params = jax.device_get(jax.tree_util.tree_map(lambda x: x[0], state.params)) model.save_pretrained(training_args.output_dir, params=params) tokenizer.save_pretrained(training_args.output_dir) if training_args.push_to_hub: api.upload_folder( commit_message=f"Saving weights and logs of epoch {epoch}", folder_path=training_args.output_dir, repo_id=repo_id, repo_type="model", token=training_args.hub_token, ) # ======================== Prediction loop ============================== if training_args.do_predict: logger.info("*** Predict ***") pred_metrics = [] pred_generations = [] pred_labels = [] pred_loader = data_loader(input_rng, predict_dataset, eval_batch_size, drop_last=False) pred_steps = math.ceil(len(predict_dataset) / eval_batch_size) for _ in tqdm(range(pred_steps), desc="Predicting...", position=2, leave=False): # Model forward batch = next(pred_loader) labels = batch["labels"] metrics = pad_shard_unpad(p_eval_step, static_return=True)( state.params, batch, min_device_batch=per_device_eval_batch_size ) pred_metrics.append(metrics) # generation if data_args.predict_with_generate: generated_ids = pad_shard_unpad(p_generate_step)(state.params, batch) pred_generations.extend(jax.device_get(generated_ids.reshape(-1, gen_kwargs["max_length"]))) pred_labels.extend(labels) # normalize prediction metrics pred_metrics = get_metrics(pred_metrics) pred_metrics = jax.tree_util.tree_map(jnp.mean, pred_metrics) # compute ROUGE metrics rouge_desc = "" if data_args.predict_with_generate: rouge_metrics = compute_metrics(pred_generations, pred_labels) pred_metrics.update(rouge_metrics) rouge_desc = " ".join([f"Predict {key}: {value} |" for key, value in rouge_metrics.items()]) # Print metrics desc = f"Predict Loss: {pred_metrics['loss']} | {rouge_desc})" logger.info(desc) # save final metrics in json if jax.process_index() == 0: rouge_metrics = {f"test_{metric_name}": value for metric_name, value in rouge_metrics.items()} path = os.path.join(training_args.output_dir, "test_results.json") with open(path, "w") as f: json.dump(rouge_metrics, f, indent=4, sort_keys=True) if __name__ == "__main__": main()
transformers/examples/flax/summarization/run_summarization_flax.py/0
{ "file_path": "transformers/examples/flax/summarization/run_summarization_flax.py", "repo_id": "transformers", "token_count": 18039 }
331